sta.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  9. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of version 2 of the GNU General Public License as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  23. * USA
  24. *
  25. * The full GNU General Public License is included in this distribution
  26. * in the file called COPYING.
  27. *
  28. * Contact Information:
  29. * Intel Linux Wireless <ilw@linux.intel.com>
  30. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  31. *
  32. * BSD LICENSE
  33. *
  34. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  35. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  36. * All rights reserved.
  37. *
  38. * Redistribution and use in source and binary forms, with or without
  39. * modification, are permitted provided that the following conditions
  40. * are met:
  41. *
  42. * * Redistributions of source code must retain the above copyright
  43. * notice, this list of conditions and the following disclaimer.
  44. * * Redistributions in binary form must reproduce the above copyright
  45. * notice, this list of conditions and the following disclaimer in
  46. * the documentation and/or other materials provided with the
  47. * distribution.
  48. * * Neither the name Intel Corporation nor the names of its
  49. * contributors may be used to endorse or promote products derived
  50. * from this software without specific prior written permission.
  51. *
  52. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  53. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  54. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  55. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  56. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  57. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  58. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  59. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  60. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  61. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  62. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  63. *
  64. *****************************************************************************/
  65. #include <net/mac80211.h>
  66. #include "mvm.h"
  67. #include "sta.h"
  68. #include "rs.h"
  69. static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
  70. enum nl80211_iftype iftype)
  71. {
  72. int sta_id;
  73. u32 reserved_ids = 0;
  74. BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
  75. WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
  76. lockdep_assert_held(&mvm->mutex);
  77. /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
  78. if (iftype != NL80211_IFTYPE_STATION)
  79. reserved_ids = BIT(0);
  80. /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
  81. for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
  82. if (BIT(sta_id) & reserved_ids)
  83. continue;
  84. if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
  85. lockdep_is_held(&mvm->mutex)))
  86. return sta_id;
  87. }
  88. return IWL_MVM_STATION_COUNT;
  89. }
  90. /* send station add/update command to firmware */
  91. int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
  92. bool update)
  93. {
  94. struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
  95. struct iwl_mvm_add_sta_cmd add_sta_cmd = {
  96. .sta_id = mvm_sta->sta_id,
  97. .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
  98. .add_modify = update ? 1 : 0,
  99. .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
  100. STA_FLG_MIMO_EN_MSK),
  101. };
  102. int ret;
  103. u32 status;
  104. u32 agg_size = 0, mpdu_dens = 0;
  105. if (!update) {
  106. add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
  107. memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
  108. }
  109. switch (sta->bandwidth) {
  110. case IEEE80211_STA_RX_BW_160:
  111. add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
  112. /* fall through */
  113. case IEEE80211_STA_RX_BW_80:
  114. add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
  115. /* fall through */
  116. case IEEE80211_STA_RX_BW_40:
  117. add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
  118. /* fall through */
  119. case IEEE80211_STA_RX_BW_20:
  120. if (sta->ht_cap.ht_supported)
  121. add_sta_cmd.station_flags |=
  122. cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
  123. break;
  124. }
  125. switch (sta->rx_nss) {
  126. case 1:
  127. add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
  128. break;
  129. case 2:
  130. add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
  131. break;
  132. case 3 ... 8:
  133. add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
  134. break;
  135. }
  136. switch (sta->smps_mode) {
  137. case IEEE80211_SMPS_AUTOMATIC:
  138. case IEEE80211_SMPS_NUM_MODES:
  139. WARN_ON(1);
  140. break;
  141. case IEEE80211_SMPS_STATIC:
  142. /* override NSS */
  143. add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
  144. add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
  145. break;
  146. case IEEE80211_SMPS_DYNAMIC:
  147. add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
  148. break;
  149. case IEEE80211_SMPS_OFF:
  150. /* nothing */
  151. break;
  152. }
  153. if (sta->ht_cap.ht_supported) {
  154. add_sta_cmd.station_flags_msk |=
  155. cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
  156. STA_FLG_AGG_MPDU_DENS_MSK);
  157. mpdu_dens = sta->ht_cap.ampdu_density;
  158. }
  159. if (sta->vht_cap.vht_supported) {
  160. agg_size = sta->vht_cap.cap &
  161. IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
  162. agg_size >>=
  163. IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
  164. } else if (sta->ht_cap.ht_supported) {
  165. agg_size = sta->ht_cap.ampdu_factor;
  166. }
  167. add_sta_cmd.station_flags |=
  168. cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
  169. add_sta_cmd.station_flags |=
  170. cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
  171. status = ADD_STA_SUCCESS;
  172. ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
  173. &add_sta_cmd, &status);
  174. if (ret)
  175. return ret;
  176. switch (status) {
  177. case ADD_STA_SUCCESS:
  178. IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
  179. break;
  180. default:
  181. ret = -EIO;
  182. IWL_ERR(mvm, "ADD_STA failed\n");
  183. break;
  184. }
  185. return ret;
  186. }
  187. static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
  188. struct ieee80211_sta *sta)
  189. {
  190. unsigned long used_hw_queues;
  191. struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
  192. u32 ac;
  193. lockdep_assert_held(&mvm->mutex);
  194. used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
  195. /* Find available queues, and allocate them to the ACs */
  196. for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
  197. u8 queue = find_first_zero_bit(&used_hw_queues,
  198. mvm->first_agg_queue);
  199. if (queue >= mvm->first_agg_queue) {
  200. IWL_ERR(mvm, "Failed to allocate STA queue\n");
  201. return -EBUSY;
  202. }
  203. __set_bit(queue, &used_hw_queues);
  204. mvmsta->hw_queue[ac] = queue;
  205. }
  206. /* Found a place for all queues - enable them */
  207. for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
  208. iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
  209. iwl_mvm_ac_to_tx_fifo[ac]);
  210. mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
  211. }
  212. return 0;
  213. }
  214. static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
  215. struct ieee80211_sta *sta)
  216. {
  217. struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
  218. unsigned long sta_msk;
  219. int i;
  220. lockdep_assert_held(&mvm->mutex);
  221. /* disable the TDLS STA-specific queues */
  222. sta_msk = mvmsta->tfd_queue_msk;
  223. for_each_set_bit(i, &sta_msk, sizeof(sta_msk))
  224. iwl_mvm_disable_txq(mvm, i);
  225. }
  226. int iwl_mvm_add_sta(struct iwl_mvm *mvm,
  227. struct ieee80211_vif *vif,
  228. struct ieee80211_sta *sta)
  229. {
  230. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  231. struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
  232. int i, ret, sta_id;
  233. lockdep_assert_held(&mvm->mutex);
  234. if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
  235. sta_id = iwl_mvm_find_free_sta_id(mvm,
  236. ieee80211_vif_type_p2p(vif));
  237. else
  238. sta_id = mvm_sta->sta_id;
  239. if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
  240. return -ENOSPC;
  241. spin_lock_init(&mvm_sta->lock);
  242. mvm_sta->sta_id = sta_id;
  243. mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
  244. mvmvif->color);
  245. mvm_sta->vif = vif;
  246. mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
  247. mvm_sta->tx_protection = 0;
  248. mvm_sta->tt_tx_protection = false;
  249. /* HW restart, don't assume the memory has been zeroed */
  250. atomic_set(&mvm->pending_frames[sta_id], 0);
  251. mvm_sta->tid_disable_agg = 0;
  252. mvm_sta->tfd_queue_msk = 0;
  253. /* allocate new queues for a TDLS station */
  254. if (sta->tdls) {
  255. ret = iwl_mvm_tdls_sta_init(mvm, sta);
  256. if (ret)
  257. return ret;
  258. } else {
  259. for (i = 0; i < IEEE80211_NUM_ACS; i++)
  260. if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
  261. mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
  262. }
  263. /* for HW restart - reset everything but the sequence number */
  264. for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
  265. u16 seq = mvm_sta->tid_data[i].seq_number;
  266. memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
  267. mvm_sta->tid_data[i].seq_number = seq;
  268. }
  269. mvm_sta->agg_tids = 0;
  270. ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
  271. if (ret)
  272. goto err;
  273. if (vif->type == NL80211_IFTYPE_STATION) {
  274. if (!sta->tdls) {
  275. WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
  276. mvmvif->ap_sta_id = sta_id;
  277. } else {
  278. WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
  279. }
  280. }
  281. rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
  282. return 0;
  283. err:
  284. iwl_mvm_tdls_sta_deinit(mvm, sta);
  285. return ret;
  286. }
  287. int iwl_mvm_update_sta(struct iwl_mvm *mvm,
  288. struct ieee80211_vif *vif,
  289. struct ieee80211_sta *sta)
  290. {
  291. return iwl_mvm_sta_send_to_fw(mvm, sta, true);
  292. }
  293. int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
  294. bool drain)
  295. {
  296. struct iwl_mvm_add_sta_cmd cmd = {};
  297. int ret;
  298. u32 status;
  299. lockdep_assert_held(&mvm->mutex);
  300. cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
  301. cmd.sta_id = mvmsta->sta_id;
  302. cmd.add_modify = STA_MODE_MODIFY;
  303. cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
  304. cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
  305. status = ADD_STA_SUCCESS;
  306. ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
  307. &cmd, &status);
  308. if (ret)
  309. return ret;
  310. switch (status) {
  311. case ADD_STA_SUCCESS:
  312. IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
  313. mvmsta->sta_id);
  314. break;
  315. default:
  316. ret = -EIO;
  317. IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
  318. mvmsta->sta_id);
  319. break;
  320. }
  321. return ret;
  322. }
  323. /*
  324. * Remove a station from the FW table. Before sending the command to remove
  325. * the station validate that the station is indeed known to the driver (sanity
  326. * only).
  327. */
  328. static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
  329. {
  330. struct ieee80211_sta *sta;
  331. struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
  332. .sta_id = sta_id,
  333. };
  334. int ret;
  335. sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
  336. lockdep_is_held(&mvm->mutex));
  337. /* Note: internal stations are marked as error values */
  338. if (!sta) {
  339. IWL_ERR(mvm, "Invalid station id\n");
  340. return -EINVAL;
  341. }
  342. ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
  343. sizeof(rm_sta_cmd), &rm_sta_cmd);
  344. if (ret) {
  345. IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
  346. return ret;
  347. }
  348. return 0;
  349. }
  350. void iwl_mvm_sta_drained_wk(struct work_struct *wk)
  351. {
  352. struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
  353. u8 sta_id;
  354. /*
  355. * The mutex is needed because of the SYNC cmd, but not only: if the
  356. * work would run concurrently with iwl_mvm_rm_sta, it would run before
  357. * iwl_mvm_rm_sta sets the station as busy, and exit. Then
  358. * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
  359. * that later.
  360. */
  361. mutex_lock(&mvm->mutex);
  362. for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
  363. int ret;
  364. struct ieee80211_sta *sta =
  365. rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
  366. lockdep_is_held(&mvm->mutex));
  367. /*
  368. * This station is in use or RCU-removed; the latter happens in
  369. * managed mode, where mac80211 removes the station before we
  370. * can remove it from firmware (we can only do that after the
  371. * MAC is marked unassociated), and possibly while the deauth
  372. * frame to disconnect from the AP is still queued. Then, the
  373. * station pointer is -ENOENT when the last skb is reclaimed.
  374. */
  375. if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
  376. continue;
  377. if (PTR_ERR(sta) == -EINVAL) {
  378. IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
  379. sta_id);
  380. continue;
  381. }
  382. if (!sta) {
  383. IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
  384. sta_id);
  385. continue;
  386. }
  387. WARN_ON(PTR_ERR(sta) != -EBUSY);
  388. /* This station was removed and we waited until it got drained,
  389. * we can now proceed and remove it.
  390. */
  391. ret = iwl_mvm_rm_sta_common(mvm, sta_id);
  392. if (ret) {
  393. IWL_ERR(mvm,
  394. "Couldn't remove sta %d after it was drained\n",
  395. sta_id);
  396. continue;
  397. }
  398. RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
  399. clear_bit(sta_id, mvm->sta_drained);
  400. if (mvm->tfd_drained[sta_id]) {
  401. unsigned long i, msk = mvm->tfd_drained[sta_id];
  402. for_each_set_bit(i, &msk, sizeof(msk))
  403. iwl_mvm_disable_txq(mvm, i);
  404. mvm->tfd_drained[sta_id] = 0;
  405. IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
  406. sta_id, msk);
  407. }
  408. }
  409. mutex_unlock(&mvm->mutex);
  410. }
  411. int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
  412. struct ieee80211_vif *vif,
  413. struct ieee80211_sta *sta)
  414. {
  415. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  416. struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
  417. int ret;
  418. lockdep_assert_held(&mvm->mutex);
  419. if (vif->type == NL80211_IFTYPE_STATION &&
  420. mvmvif->ap_sta_id == mvm_sta->sta_id) {
  421. /* flush its queues here since we are freeing mvm_sta */
  422. ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
  423. /* if we are associated - we can't remove the AP STA now */
  424. if (vif->bss_conf.assoc)
  425. return ret;
  426. /* unassoc - go ahead - remove the AP STA now */
  427. mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
  428. /* clear d0i3_ap_sta_id if no longer relevant */
  429. if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
  430. mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
  431. }
  432. /*
  433. * Make sure that the tx response code sees the station as -EBUSY and
  434. * calls the drain worker.
  435. */
  436. spin_lock_bh(&mvm_sta->lock);
  437. /*
  438. * There are frames pending on the AC queues for this station.
  439. * We need to wait until all the frames are drained...
  440. */
  441. if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
  442. rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
  443. ERR_PTR(-EBUSY));
  444. spin_unlock_bh(&mvm_sta->lock);
  445. /* disable TDLS sta queues on drain complete */
  446. if (sta->tdls) {
  447. mvm->tfd_drained[mvm_sta->sta_id] =
  448. mvm_sta->tfd_queue_msk;
  449. IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
  450. mvm_sta->sta_id);
  451. }
  452. ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
  453. } else {
  454. spin_unlock_bh(&mvm_sta->lock);
  455. if (sta->tdls)
  456. iwl_mvm_tdls_sta_deinit(mvm, sta);
  457. ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
  458. RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
  459. }
  460. return ret;
  461. }
  462. int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
  463. struct ieee80211_vif *vif,
  464. u8 sta_id)
  465. {
  466. int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
  467. lockdep_assert_held(&mvm->mutex);
  468. RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
  469. return ret;
  470. }
  471. static int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
  472. struct iwl_mvm_int_sta *sta,
  473. u32 qmask, enum nl80211_iftype iftype)
  474. {
  475. if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
  476. sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
  477. if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
  478. return -ENOSPC;
  479. }
  480. sta->tfd_queue_msk = qmask;
  481. /* put a non-NULL value so iterating over the stations won't stop */
  482. rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
  483. return 0;
  484. }
  485. static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
  486. struct iwl_mvm_int_sta *sta)
  487. {
  488. RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
  489. memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
  490. sta->sta_id = IWL_MVM_STATION_COUNT;
  491. }
  492. static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
  493. struct iwl_mvm_int_sta *sta,
  494. const u8 *addr,
  495. u16 mac_id, u16 color)
  496. {
  497. struct iwl_mvm_add_sta_cmd cmd;
  498. int ret;
  499. u32 status;
  500. lockdep_assert_held(&mvm->mutex);
  501. memset(&cmd, 0, sizeof(cmd));
  502. cmd.sta_id = sta->sta_id;
  503. cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
  504. color));
  505. cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
  506. if (addr)
  507. memcpy(cmd.addr, addr, ETH_ALEN);
  508. ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
  509. &cmd, &status);
  510. if (ret)
  511. return ret;
  512. switch (status) {
  513. case ADD_STA_SUCCESS:
  514. IWL_DEBUG_INFO(mvm, "Internal station added.\n");
  515. return 0;
  516. default:
  517. ret = -EIO;
  518. IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
  519. status);
  520. break;
  521. }
  522. return ret;
  523. }
  524. int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
  525. {
  526. int ret;
  527. lockdep_assert_held(&mvm->mutex);
  528. /* Map Aux queue to fifo - needs to happen before adding Aux station */
  529. iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue,
  530. IWL_MVM_TX_FIFO_MCAST);
  531. /* Allocate aux station and assign to it the aux queue */
  532. ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
  533. NL80211_IFTYPE_UNSPECIFIED);
  534. if (ret)
  535. return ret;
  536. ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
  537. MAC_INDEX_AUX, 0);
  538. if (ret)
  539. iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
  540. return ret;
  541. }
  542. void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
  543. {
  544. lockdep_assert_held(&mvm->mutex);
  545. iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
  546. }
  547. /*
  548. * Send the add station command for the vif's broadcast station.
  549. * Assumes that the station was already allocated.
  550. *
  551. * @mvm: the mvm component
  552. * @vif: the interface to which the broadcast station is added
  553. * @bsta: the broadcast station to add.
  554. */
  555. int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  556. {
  557. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  558. struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
  559. static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
  560. const u8 *baddr = _baddr;
  561. lockdep_assert_held(&mvm->mutex);
  562. if (vif->type == NL80211_IFTYPE_ADHOC)
  563. baddr = vif->bss_conf.bssid;
  564. if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
  565. return -ENOSPC;
  566. return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
  567. mvmvif->id, mvmvif->color);
  568. }
  569. /* Send the FW a request to remove the station from it's internal data
  570. * structures, but DO NOT remove the entry from the local data structures. */
  571. int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  572. {
  573. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  574. int ret;
  575. lockdep_assert_held(&mvm->mutex);
  576. ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
  577. if (ret)
  578. IWL_WARN(mvm, "Failed sending remove station\n");
  579. return ret;
  580. }
  581. int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  582. {
  583. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  584. u32 qmask;
  585. lockdep_assert_held(&mvm->mutex);
  586. qmask = iwl_mvm_mac_get_queues_mask(vif);
  587. /*
  588. * The firmware defines the TFD queue mask to only be relevant
  589. * for *unicast* queues, so the multicast (CAB) queue shouldn't
  590. * be included.
  591. */
  592. if (vif->type == NL80211_IFTYPE_AP)
  593. qmask &= ~BIT(vif->cab_queue);
  594. return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
  595. ieee80211_vif_type_p2p(vif));
  596. }
  597. /* Allocate a new station entry for the broadcast station to the given vif,
  598. * and send it to the FW.
  599. * Note that each P2P mac should have its own broadcast station.
  600. *
  601. * @mvm: the mvm component
  602. * @vif: the interface to which the broadcast station is added
  603. * @bsta: the broadcast station to add. */
  604. int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  605. {
  606. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  607. struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
  608. int ret;
  609. lockdep_assert_held(&mvm->mutex);
  610. ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
  611. if (ret)
  612. return ret;
  613. ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
  614. if (ret)
  615. iwl_mvm_dealloc_int_sta(mvm, bsta);
  616. return ret;
  617. }
  618. void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  619. {
  620. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  621. iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
  622. }
  623. /*
  624. * Send the FW a request to remove the station from it's internal data
  625. * structures, and in addition remove it from the local data structure.
  626. */
  627. int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  628. {
  629. int ret;
  630. lockdep_assert_held(&mvm->mutex);
  631. ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
  632. iwl_mvm_dealloc_bcast_sta(mvm, vif);
  633. return ret;
  634. }
  635. #define IWL_MAX_RX_BA_SESSIONS 16
  636. int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
  637. int tid, u16 ssn, bool start)
  638. {
  639. struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
  640. struct iwl_mvm_add_sta_cmd cmd = {};
  641. int ret;
  642. u32 status;
  643. lockdep_assert_held(&mvm->mutex);
  644. if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
  645. IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
  646. return -ENOSPC;
  647. }
  648. cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
  649. cmd.sta_id = mvm_sta->sta_id;
  650. cmd.add_modify = STA_MODE_MODIFY;
  651. if (start) {
  652. cmd.add_immediate_ba_tid = (u8) tid;
  653. cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
  654. } else {
  655. cmd.remove_immediate_ba_tid = (u8) tid;
  656. }
  657. cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
  658. STA_MODIFY_REMOVE_BA_TID;
  659. status = ADD_STA_SUCCESS;
  660. ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
  661. &cmd, &status);
  662. if (ret)
  663. return ret;
  664. switch (status) {
  665. case ADD_STA_SUCCESS:
  666. IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
  667. start ? "start" : "stopp");
  668. break;
  669. case ADD_STA_IMMEDIATE_BA_FAILURE:
  670. IWL_WARN(mvm, "RX BA Session refused by fw\n");
  671. ret = -ENOSPC;
  672. break;
  673. default:
  674. ret = -EIO;
  675. IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
  676. start ? "start" : "stopp", status);
  677. break;
  678. }
  679. if (!ret) {
  680. if (start)
  681. mvm->rx_ba_sessions++;
  682. else if (mvm->rx_ba_sessions > 0)
  683. /* check that restart flow didn't zero the counter */
  684. mvm->rx_ba_sessions--;
  685. }
  686. return ret;
  687. }
  688. static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
  689. int tid, u8 queue, bool start)
  690. {
  691. struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
  692. struct iwl_mvm_add_sta_cmd cmd = {};
  693. int ret;
  694. u32 status;
  695. lockdep_assert_held(&mvm->mutex);
  696. if (start) {
  697. mvm_sta->tfd_queue_msk |= BIT(queue);
  698. mvm_sta->tid_disable_agg &= ~BIT(tid);
  699. } else {
  700. mvm_sta->tfd_queue_msk &= ~BIT(queue);
  701. mvm_sta->tid_disable_agg |= BIT(tid);
  702. }
  703. cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
  704. cmd.sta_id = mvm_sta->sta_id;
  705. cmd.add_modify = STA_MODE_MODIFY;
  706. cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
  707. cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
  708. cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
  709. status = ADD_STA_SUCCESS;
  710. ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
  711. &cmd, &status);
  712. if (ret)
  713. return ret;
  714. switch (status) {
  715. case ADD_STA_SUCCESS:
  716. break;
  717. default:
  718. ret = -EIO;
  719. IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
  720. start ? "start" : "stopp", status);
  721. break;
  722. }
  723. return ret;
  724. }
  725. const u8 tid_to_mac80211_ac[] = {
  726. IEEE80211_AC_BE,
  727. IEEE80211_AC_BK,
  728. IEEE80211_AC_BK,
  729. IEEE80211_AC_BE,
  730. IEEE80211_AC_VI,
  731. IEEE80211_AC_VI,
  732. IEEE80211_AC_VO,
  733. IEEE80211_AC_VO,
  734. };
  735. static const u8 tid_to_ucode_ac[] = {
  736. AC_BE,
  737. AC_BK,
  738. AC_BK,
  739. AC_BE,
  740. AC_VI,
  741. AC_VI,
  742. AC_VO,
  743. AC_VO,
  744. };
  745. int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  746. struct ieee80211_sta *sta, u16 tid, u16 *ssn)
  747. {
  748. struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
  749. struct iwl_mvm_tid_data *tid_data;
  750. int txq_id;
  751. if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
  752. return -EINVAL;
  753. if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
  754. IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
  755. mvmsta->tid_data[tid].state);
  756. return -ENXIO;
  757. }
  758. lockdep_assert_held(&mvm->mutex);
  759. for (txq_id = mvm->first_agg_queue;
  760. txq_id <= mvm->last_agg_queue; txq_id++)
  761. if (mvm->queue_to_mac80211[txq_id] ==
  762. IWL_INVALID_MAC80211_QUEUE)
  763. break;
  764. if (txq_id > mvm->last_agg_queue) {
  765. IWL_ERR(mvm, "Failed to allocate agg queue\n");
  766. return -EIO;
  767. }
  768. spin_lock_bh(&mvmsta->lock);
  769. /* possible race condition - we entered D0i3 while starting agg */
  770. if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
  771. spin_unlock_bh(&mvmsta->lock);
  772. IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
  773. return -EIO;
  774. }
  775. /* the new tx queue is still connected to the same mac80211 queue */
  776. mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
  777. tid_data = &mvmsta->tid_data[tid];
  778. tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
  779. tid_data->txq_id = txq_id;
  780. *ssn = tid_data->ssn;
  781. IWL_DEBUG_TX_QUEUES(mvm,
  782. "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
  783. mvmsta->sta_id, tid, txq_id, tid_data->ssn,
  784. tid_data->next_reclaimed);
  785. if (tid_data->ssn == tid_data->next_reclaimed) {
  786. tid_data->state = IWL_AGG_STARTING;
  787. ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  788. } else {
  789. tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
  790. }
  791. spin_unlock_bh(&mvmsta->lock);
  792. return 0;
  793. }
  794. int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  795. struct ieee80211_sta *sta, u16 tid, u8 buf_size)
  796. {
  797. struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
  798. struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
  799. int queue, fifo, ret;
  800. u16 ssn;
  801. BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
  802. != IWL_MAX_TID_COUNT);
  803. buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
  804. spin_lock_bh(&mvmsta->lock);
  805. ssn = tid_data->ssn;
  806. queue = tid_data->txq_id;
  807. tid_data->state = IWL_AGG_ON;
  808. mvmsta->agg_tids |= BIT(tid);
  809. tid_data->ssn = 0xffff;
  810. spin_unlock_bh(&mvmsta->lock);
  811. fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
  812. ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
  813. if (ret)
  814. return -EIO;
  815. iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
  816. buf_size, ssn);
  817. /*
  818. * Even though in theory the peer could have different
  819. * aggregation reorder buffer sizes for different sessions,
  820. * our ucode doesn't allow for that and has a global limit
  821. * for each station. Therefore, use the minimum of all the
  822. * aggregation sessions and our default value.
  823. */
  824. mvmsta->max_agg_bufsize =
  825. min(mvmsta->max_agg_bufsize, buf_size);
  826. mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
  827. IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
  828. sta->addr, tid);
  829. return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
  830. }
  831. int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  832. struct ieee80211_sta *sta, u16 tid)
  833. {
  834. struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
  835. struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
  836. u16 txq_id;
  837. int err;
  838. /*
  839. * If mac80211 is cleaning its state, then say that we finished since
  840. * our state has been cleared anyway.
  841. */
  842. if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
  843. ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  844. return 0;
  845. }
  846. spin_lock_bh(&mvmsta->lock);
  847. txq_id = tid_data->txq_id;
  848. IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
  849. mvmsta->sta_id, tid, txq_id, tid_data->state);
  850. mvmsta->agg_tids &= ~BIT(tid);
  851. switch (tid_data->state) {
  852. case IWL_AGG_ON:
  853. tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
  854. IWL_DEBUG_TX_QUEUES(mvm,
  855. "ssn = %d, next_recl = %d\n",
  856. tid_data->ssn, tid_data->next_reclaimed);
  857. /* There are still packets for this RA / TID in the HW */
  858. if (tid_data->ssn != tid_data->next_reclaimed) {
  859. tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
  860. err = 0;
  861. break;
  862. }
  863. tid_data->ssn = 0xffff;
  864. tid_data->state = IWL_AGG_OFF;
  865. mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
  866. spin_unlock_bh(&mvmsta->lock);
  867. ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  868. iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
  869. iwl_mvm_disable_txq(mvm, txq_id);
  870. return 0;
  871. case IWL_AGG_STARTING:
  872. case IWL_EMPTYING_HW_QUEUE_ADDBA:
  873. /*
  874. * The agg session has been stopped before it was set up. This
  875. * can happen when the AddBA timer times out for example.
  876. */
  877. /* No barriers since we are under mutex */
  878. lockdep_assert_held(&mvm->mutex);
  879. mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
  880. ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  881. tid_data->state = IWL_AGG_OFF;
  882. err = 0;
  883. break;
  884. default:
  885. IWL_ERR(mvm,
  886. "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
  887. mvmsta->sta_id, tid, tid_data->state);
  888. IWL_ERR(mvm,
  889. "\ttid_data->txq_id = %d\n", tid_data->txq_id);
  890. err = -EINVAL;
  891. }
  892. spin_unlock_bh(&mvmsta->lock);
  893. return err;
  894. }
  895. int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  896. struct ieee80211_sta *sta, u16 tid)
  897. {
  898. struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
  899. struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
  900. u16 txq_id;
  901. enum iwl_mvm_agg_state old_state;
  902. /*
  903. * First set the agg state to OFF to avoid calling
  904. * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
  905. */
  906. spin_lock_bh(&mvmsta->lock);
  907. txq_id = tid_data->txq_id;
  908. IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
  909. mvmsta->sta_id, tid, txq_id, tid_data->state);
  910. old_state = tid_data->state;
  911. tid_data->state = IWL_AGG_OFF;
  912. mvmsta->agg_tids &= ~BIT(tid);
  913. spin_unlock_bh(&mvmsta->lock);
  914. if (old_state >= IWL_AGG_ON) {
  915. if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
  916. IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
  917. iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
  918. iwl_mvm_disable_txq(mvm, tid_data->txq_id);
  919. }
  920. mvm->queue_to_mac80211[tid_data->txq_id] =
  921. IWL_INVALID_MAC80211_QUEUE;
  922. return 0;
  923. }
  924. static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
  925. {
  926. int i;
  927. lockdep_assert_held(&mvm->mutex);
  928. i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM);
  929. if (i == STA_KEY_MAX_NUM)
  930. return STA_KEY_IDX_INVALID;
  931. __set_bit(i, mvm->fw_key_table);
  932. return i;
  933. }
  934. static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
  935. struct ieee80211_sta *sta)
  936. {
  937. struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
  938. if (sta) {
  939. struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
  940. return mvm_sta->sta_id;
  941. }
  942. /*
  943. * The device expects GTKs for station interfaces to be
  944. * installed as GTKs for the AP station. If we have no
  945. * station ID, then use AP's station ID.
  946. */
  947. if (vif->type == NL80211_IFTYPE_STATION &&
  948. mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT)
  949. return mvmvif->ap_sta_id;
  950. return IWL_MVM_STATION_COUNT;
  951. }
  952. static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
  953. struct iwl_mvm_sta *mvm_sta,
  954. struct ieee80211_key_conf *keyconf, bool mcast,
  955. u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags)
  956. {
  957. struct iwl_mvm_add_sta_key_cmd cmd = {};
  958. __le16 key_flags;
  959. int ret;
  960. u32 status;
  961. u16 keyidx;
  962. int i;
  963. u8 sta_id = mvm_sta->sta_id;
  964. keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
  965. STA_KEY_FLG_KEYID_MSK;
  966. key_flags = cpu_to_le16(keyidx);
  967. key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
  968. switch (keyconf->cipher) {
  969. case WLAN_CIPHER_SUITE_TKIP:
  970. key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
  971. cmd.tkip_rx_tsc_byte2 = tkip_iv32;
  972. for (i = 0; i < 5; i++)
  973. cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
  974. memcpy(cmd.key, keyconf->key, keyconf->keylen);
  975. break;
  976. case WLAN_CIPHER_SUITE_CCMP:
  977. key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
  978. memcpy(cmd.key, keyconf->key, keyconf->keylen);
  979. break;
  980. case WLAN_CIPHER_SUITE_WEP104:
  981. key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
  982. case WLAN_CIPHER_SUITE_WEP40:
  983. key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
  984. memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
  985. break;
  986. default:
  987. key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
  988. memcpy(cmd.key, keyconf->key, keyconf->keylen);
  989. }
  990. if (mcast)
  991. key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
  992. cmd.key_offset = keyconf->hw_key_idx;
  993. cmd.key_flags = key_flags;
  994. cmd.sta_id = sta_id;
  995. status = ADD_STA_SUCCESS;
  996. if (cmd_flags & CMD_ASYNC)
  997. ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
  998. sizeof(cmd), &cmd);
  999. else
  1000. ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
  1001. &cmd, &status);
  1002. switch (status) {
  1003. case ADD_STA_SUCCESS:
  1004. IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
  1005. break;
  1006. default:
  1007. ret = -EIO;
  1008. IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
  1009. break;
  1010. }
  1011. return ret;
  1012. }
  1013. static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
  1014. struct ieee80211_key_conf *keyconf,
  1015. u8 sta_id, bool remove_key)
  1016. {
  1017. struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
  1018. /* verify the key details match the required command's expectations */
  1019. if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
  1020. (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
  1021. (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
  1022. return -EINVAL;
  1023. igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
  1024. igtk_cmd.sta_id = cpu_to_le32(sta_id);
  1025. if (remove_key) {
  1026. igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
  1027. } else {
  1028. struct ieee80211_key_seq seq;
  1029. const u8 *pn;
  1030. memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
  1031. ieee80211_aes_cmac_calculate_k1_k2(keyconf,
  1032. igtk_cmd.K1, igtk_cmd.K2);
  1033. ieee80211_get_key_rx_seq(keyconf, 0, &seq);
  1034. pn = seq.aes_cmac.pn;
  1035. igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
  1036. ((u64) pn[4] << 8) |
  1037. ((u64) pn[3] << 16) |
  1038. ((u64) pn[2] << 24) |
  1039. ((u64) pn[1] << 32) |
  1040. ((u64) pn[0] << 40));
  1041. }
  1042. IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
  1043. remove_key ? "removing" : "installing",
  1044. igtk_cmd.sta_id);
  1045. return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
  1046. sizeof(igtk_cmd), &igtk_cmd);
  1047. }
  1048. static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
  1049. struct ieee80211_vif *vif,
  1050. struct ieee80211_sta *sta)
  1051. {
  1052. struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
  1053. if (sta)
  1054. return sta->addr;
  1055. if (vif->type == NL80211_IFTYPE_STATION &&
  1056. mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
  1057. u8 sta_id = mvmvif->ap_sta_id;
  1058. sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
  1059. lockdep_is_held(&mvm->mutex));
  1060. return sta->addr;
  1061. }
  1062. return NULL;
  1063. }
  1064. static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
  1065. struct ieee80211_vif *vif,
  1066. struct ieee80211_sta *sta,
  1067. struct ieee80211_key_conf *keyconf,
  1068. bool mcast)
  1069. {
  1070. struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
  1071. int ret;
  1072. const u8 *addr;
  1073. struct ieee80211_key_seq seq;
  1074. u16 p1k[5];
  1075. switch (keyconf->cipher) {
  1076. case WLAN_CIPHER_SUITE_TKIP:
  1077. addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
  1078. /* get phase 1 key from mac80211 */
  1079. ieee80211_get_key_rx_seq(keyconf, 0, &seq);
  1080. ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
  1081. ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
  1082. seq.tkip.iv32, p1k, 0);
  1083. break;
  1084. case WLAN_CIPHER_SUITE_CCMP:
  1085. case WLAN_CIPHER_SUITE_WEP40:
  1086. case WLAN_CIPHER_SUITE_WEP104:
  1087. ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
  1088. 0, NULL, 0);
  1089. break;
  1090. default:
  1091. ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
  1092. 0, NULL, 0);
  1093. }
  1094. return ret;
  1095. }
  1096. static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
  1097. struct ieee80211_key_conf *keyconf,
  1098. bool mcast)
  1099. {
  1100. struct iwl_mvm_add_sta_key_cmd cmd = {};
  1101. __le16 key_flags;
  1102. int ret;
  1103. u32 status;
  1104. key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
  1105. STA_KEY_FLG_KEYID_MSK);
  1106. key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
  1107. key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
  1108. if (mcast)
  1109. key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
  1110. cmd.key_flags = key_flags;
  1111. cmd.key_offset = keyconf->hw_key_idx;
  1112. cmd.sta_id = sta_id;
  1113. status = ADD_STA_SUCCESS;
  1114. ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
  1115. &cmd, &status);
  1116. switch (status) {
  1117. case ADD_STA_SUCCESS:
  1118. IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
  1119. break;
  1120. default:
  1121. ret = -EIO;
  1122. IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
  1123. break;
  1124. }
  1125. return ret;
  1126. }
  1127. int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
  1128. struct ieee80211_vif *vif,
  1129. struct ieee80211_sta *sta,
  1130. struct ieee80211_key_conf *keyconf,
  1131. bool have_key_offset)
  1132. {
  1133. bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
  1134. u8 sta_id;
  1135. int ret;
  1136. lockdep_assert_held(&mvm->mutex);
  1137. /* Get the station id from the mvm local station table */
  1138. sta_id = iwl_mvm_get_key_sta_id(vif, sta);
  1139. if (sta_id == IWL_MVM_STATION_COUNT) {
  1140. IWL_ERR(mvm, "Failed to find station id\n");
  1141. return -EINVAL;
  1142. }
  1143. if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
  1144. ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
  1145. goto end;
  1146. }
  1147. /*
  1148. * It is possible that the 'sta' parameter is NULL, and thus
  1149. * there is a need to retrieve the sta from the local station table.
  1150. */
  1151. if (!sta) {
  1152. sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
  1153. lockdep_is_held(&mvm->mutex));
  1154. if (IS_ERR_OR_NULL(sta)) {
  1155. IWL_ERR(mvm, "Invalid station id\n");
  1156. return -EINVAL;
  1157. }
  1158. }
  1159. if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
  1160. return -EINVAL;
  1161. if (!have_key_offset) {
  1162. /*
  1163. * The D3 firmware hardcodes the PTK offset to 0, so we have to
  1164. * configure it there. As a result, this workaround exists to
  1165. * let the caller set the key offset (hw_key_idx), see d3.c.
  1166. */
  1167. keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm);
  1168. if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
  1169. return -ENOSPC;
  1170. }
  1171. ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast);
  1172. if (ret) {
  1173. __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
  1174. goto end;
  1175. }
  1176. /*
  1177. * For WEP, the same key is used for multicast and unicast. Upload it
  1178. * again, using the same key offset, and now pointing the other one
  1179. * to the same key slot (offset).
  1180. * If this fails, remove the original as well.
  1181. */
  1182. if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
  1183. keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
  1184. ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast);
  1185. if (ret) {
  1186. __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
  1187. __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
  1188. }
  1189. }
  1190. end:
  1191. IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
  1192. keyconf->cipher, keyconf->keylen, keyconf->keyidx,
  1193. sta->addr, ret);
  1194. return ret;
  1195. }
  1196. int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
  1197. struct ieee80211_vif *vif,
  1198. struct ieee80211_sta *sta,
  1199. struct ieee80211_key_conf *keyconf)
  1200. {
  1201. bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
  1202. u8 sta_id;
  1203. int ret;
  1204. lockdep_assert_held(&mvm->mutex);
  1205. /* Get the station id from the mvm local station table */
  1206. sta_id = iwl_mvm_get_key_sta_id(vif, sta);
  1207. IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
  1208. keyconf->keyidx, sta_id);
  1209. if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
  1210. return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
  1211. if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
  1212. IWL_ERR(mvm, "offset %d not used in fw key table.\n",
  1213. keyconf->hw_key_idx);
  1214. return -ENOENT;
  1215. }
  1216. if (sta_id == IWL_MVM_STATION_COUNT) {
  1217. IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
  1218. return 0;
  1219. }
  1220. /*
  1221. * It is possible that the 'sta' parameter is NULL, and thus
  1222. * there is a need to retrieve the sta from the local station table,
  1223. * for example when a GTK is removed (where the sta_id will then be
  1224. * the AP ID, and no station was passed by mac80211.)
  1225. */
  1226. if (!sta) {
  1227. sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
  1228. lockdep_is_held(&mvm->mutex));
  1229. if (!sta) {
  1230. IWL_ERR(mvm, "Invalid station id\n");
  1231. return -EINVAL;
  1232. }
  1233. }
  1234. if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
  1235. return -EINVAL;
  1236. ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
  1237. if (ret)
  1238. return ret;
  1239. /* delete WEP key twice to get rid of (now useless) offset */
  1240. if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
  1241. keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
  1242. ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
  1243. return ret;
  1244. }
  1245. void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
  1246. struct ieee80211_vif *vif,
  1247. struct ieee80211_key_conf *keyconf,
  1248. struct ieee80211_sta *sta, u32 iv32,
  1249. u16 *phase1key)
  1250. {
  1251. struct iwl_mvm_sta *mvm_sta;
  1252. u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
  1253. bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
  1254. if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
  1255. return;
  1256. rcu_read_lock();
  1257. if (!sta) {
  1258. sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
  1259. if (WARN_ON(IS_ERR_OR_NULL(sta))) {
  1260. rcu_read_unlock();
  1261. return;
  1262. }
  1263. }
  1264. mvm_sta = iwl_mvm_sta_from_mac80211(sta);
  1265. iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
  1266. iv32, phase1key, CMD_ASYNC);
  1267. rcu_read_unlock();
  1268. }
  1269. void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
  1270. struct ieee80211_sta *sta)
  1271. {
  1272. struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
  1273. struct iwl_mvm_add_sta_cmd cmd = {
  1274. .add_modify = STA_MODE_MODIFY,
  1275. .sta_id = mvmsta->sta_id,
  1276. .station_flags_msk = cpu_to_le32(STA_FLG_PS),
  1277. .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
  1278. };
  1279. int ret;
  1280. ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
  1281. if (ret)
  1282. IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
  1283. }
  1284. void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
  1285. struct ieee80211_sta *sta,
  1286. enum ieee80211_frame_release_type reason,
  1287. u16 cnt, u16 tids, bool more_data,
  1288. bool agg)
  1289. {
  1290. struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
  1291. struct iwl_mvm_add_sta_cmd cmd = {
  1292. .add_modify = STA_MODE_MODIFY,
  1293. .sta_id = mvmsta->sta_id,
  1294. .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
  1295. .sleep_tx_count = cpu_to_le16(cnt),
  1296. .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
  1297. };
  1298. int tid, ret;
  1299. unsigned long _tids = tids;
  1300. /* convert TIDs to ACs - we don't support TSPEC so that's OK
  1301. * Note that this field is reserved and unused by firmware not
  1302. * supporting GO uAPSD, so it's safe to always do this.
  1303. */
  1304. for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
  1305. cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
  1306. /* If we're releasing frames from aggregation queues then check if the
  1307. * all queues combined that we're releasing frames from have
  1308. * - more frames than the service period, in which case more_data
  1309. * needs to be set
  1310. * - fewer than 'cnt' frames, in which case we need to adjust the
  1311. * firmware command (but do that unconditionally)
  1312. */
  1313. if (agg) {
  1314. int remaining = cnt;
  1315. spin_lock_bh(&mvmsta->lock);
  1316. for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
  1317. struct iwl_mvm_tid_data *tid_data;
  1318. u16 n_queued;
  1319. tid_data = &mvmsta->tid_data[tid];
  1320. if (WARN(tid_data->state != IWL_AGG_ON &&
  1321. tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
  1322. "TID %d state is %d\n",
  1323. tid, tid_data->state)) {
  1324. spin_unlock_bh(&mvmsta->lock);
  1325. ieee80211_sta_eosp(sta);
  1326. return;
  1327. }
  1328. n_queued = iwl_mvm_tid_queued(tid_data);
  1329. if (n_queued > remaining) {
  1330. more_data = true;
  1331. remaining = 0;
  1332. break;
  1333. }
  1334. remaining -= n_queued;
  1335. }
  1336. spin_unlock_bh(&mvmsta->lock);
  1337. cmd.sleep_tx_count = cpu_to_le16(cnt - remaining);
  1338. if (WARN_ON(cnt - remaining == 0)) {
  1339. ieee80211_sta_eosp(sta);
  1340. return;
  1341. }
  1342. }
  1343. /* Note: this is ignored by firmware not supporting GO uAPSD */
  1344. if (more_data)
  1345. cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
  1346. if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
  1347. mvmsta->next_status_eosp = true;
  1348. cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
  1349. } else {
  1350. cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
  1351. }
  1352. ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
  1353. if (ret)
  1354. IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
  1355. }
  1356. int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
  1357. struct iwl_rx_cmd_buffer *rxb,
  1358. struct iwl_device_cmd *cmd)
  1359. {
  1360. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  1361. struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
  1362. struct ieee80211_sta *sta;
  1363. u32 sta_id = le32_to_cpu(notif->sta_id);
  1364. if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
  1365. return 0;
  1366. rcu_read_lock();
  1367. sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
  1368. if (!IS_ERR_OR_NULL(sta))
  1369. ieee80211_sta_eosp(sta);
  1370. rcu_read_unlock();
  1371. return 0;
  1372. }
  1373. void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
  1374. struct iwl_mvm_sta *mvmsta, bool disable)
  1375. {
  1376. struct iwl_mvm_add_sta_cmd cmd = {
  1377. .add_modify = STA_MODE_MODIFY,
  1378. .sta_id = mvmsta->sta_id,
  1379. .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
  1380. .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
  1381. .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
  1382. };
  1383. int ret;
  1384. if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_DISABLE_STA_TX))
  1385. return;
  1386. ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
  1387. if (ret)
  1388. IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
  1389. }
  1390. void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
  1391. struct ieee80211_sta *sta,
  1392. bool disable)
  1393. {
  1394. struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
  1395. spin_lock_bh(&mvm_sta->lock);
  1396. if (mvm_sta->disable_tx == disable) {
  1397. spin_unlock_bh(&mvm_sta->lock);
  1398. return;
  1399. }
  1400. mvm_sta->disable_tx = disable;
  1401. /*
  1402. * Tell mac80211 to start/stop queueing tx for this station,
  1403. * but don't stop queueing if there are still pending frames
  1404. * for this station.
  1405. */
  1406. if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
  1407. ieee80211_sta_block_awake(mvm->hw, sta, disable);
  1408. iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
  1409. spin_unlock_bh(&mvm_sta->lock);
  1410. }
  1411. void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
  1412. struct iwl_mvm_vif *mvmvif,
  1413. bool disable)
  1414. {
  1415. struct ieee80211_sta *sta;
  1416. struct iwl_mvm_sta *mvm_sta;
  1417. int i;
  1418. lockdep_assert_held(&mvm->mutex);
  1419. /* Block/unblock all the stations of the given mvmvif */
  1420. for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
  1421. sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
  1422. lockdep_is_held(&mvm->mutex));
  1423. if (IS_ERR_OR_NULL(sta))
  1424. continue;
  1425. mvm_sta = iwl_mvm_sta_from_mac80211(sta);
  1426. if (mvm_sta->mac_id_n_color !=
  1427. FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
  1428. continue;
  1429. iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
  1430. }
  1431. }