time-event.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  9. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10. * Copyright(c) 2017 Intel Deutschland GmbH
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of version 2 of the GNU General Public License as
  14. * published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  24. * USA
  25. *
  26. * The full GNU General Public License is included in this distribution
  27. * in the file called COPYING.
  28. *
  29. * Contact Information:
  30. * Intel Linux Wireless <linuxwifi@intel.com>
  31. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  32. *
  33. * BSD LICENSE
  34. *
  35. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  36. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  37. * Copyright(c) 2017 Intel Deutschland GmbH
  38. * All rights reserved.
  39. *
  40. * Redistribution and use in source and binary forms, with or without
  41. * modification, are permitted provided that the following conditions
  42. * are met:
  43. *
  44. * * Redistributions of source code must retain the above copyright
  45. * notice, this list of conditions and the following disclaimer.
  46. * * Redistributions in binary form must reproduce the above copyright
  47. * notice, this list of conditions and the following disclaimer in
  48. * the documentation and/or other materials provided with the
  49. * distribution.
  50. * * Neither the name Intel Corporation nor the names of its
  51. * contributors may be used to endorse or promote products derived
  52. * from this software without specific prior written permission.
  53. *
  54. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  55. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  56. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  57. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  58. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  59. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  60. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  61. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  62. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  63. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  64. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  65. *
  66. *****************************************************************************/
  67. #include <linux/jiffies.h>
  68. #include <net/mac80211.h>
  69. #include "fw/notif-wait.h"
  70. #include "iwl-trans.h"
  71. #include "fw-api.h"
  72. #include "time-event.h"
  73. #include "mvm.h"
  74. #include "iwl-io.h"
  75. #include "iwl-prph.h"
  76. /*
  77. * For the high priority TE use a time event type that has similar priority to
  78. * the FW's action scan priority.
  79. */
  80. #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
  81. #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
  82. void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
  83. struct iwl_mvm_time_event_data *te_data)
  84. {
  85. lockdep_assert_held(&mvm->time_event_lock);
  86. if (!te_data->vif)
  87. return;
  88. list_del(&te_data->list);
  89. te_data->running = false;
  90. te_data->uid = 0;
  91. te_data->id = TE_MAX;
  92. te_data->vif = NULL;
  93. }
  94. void iwl_mvm_roc_done_wk(struct work_struct *wk)
  95. {
  96. struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
  97. /*
  98. * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
  99. * This will cause the TX path to drop offchannel transmissions.
  100. * That would also be done by mac80211, but it is racy, in particular
  101. * in the case that the time event actually completed in the firmware
  102. * (which is handled in iwl_mvm_te_handle_notif).
  103. */
  104. if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
  105. iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
  106. if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
  107. iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
  108. synchronize_net();
  109. /*
  110. * Flush the offchannel queue -- this is called when the time
  111. * event finishes or is canceled, so that frames queued for it
  112. * won't get stuck on the queue and be transmitted in the next
  113. * time event.
  114. * We have to send the command asynchronously since this cannot
  115. * be under the mutex for locking reasons, but that's not an
  116. * issue as it will have to complete before the next command is
  117. * executed, and a new time event means a new command.
  118. */
  119. iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
  120. /* Do the same for the P2P device queue (STA) */
  121. if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
  122. struct iwl_mvm_vif *mvmvif;
  123. /*
  124. * NB: access to this pointer would be racy, but the flush bit
  125. * can only be set when we had a P2P-Device VIF, and we have a
  126. * flush of this work in iwl_mvm_prepare_mac_removal() so it's
  127. * not really racy.
  128. */
  129. if (!WARN_ON(!mvm->p2p_device_vif)) {
  130. mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
  131. iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
  132. CMD_ASYNC);
  133. }
  134. }
  135. }
  136. static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
  137. {
  138. /*
  139. * Of course, our status bit is just as racy as mac80211, so in
  140. * addition, fire off the work struct which will drop all frames
  141. * from the hardware queues that made it through the race. First
  142. * it will of course synchronize the TX path to make sure that
  143. * any *new* TX will be rejected.
  144. */
  145. schedule_work(&mvm->roc_done_wk);
  146. }
  147. static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
  148. {
  149. struct ieee80211_vif *csa_vif;
  150. rcu_read_lock();
  151. csa_vif = rcu_dereference(mvm->csa_vif);
  152. if (!csa_vif || !csa_vif->csa_active)
  153. goto out_unlock;
  154. IWL_DEBUG_TE(mvm, "CSA NOA started\n");
  155. /*
  156. * CSA NoA is started but we still have beacons to
  157. * transmit on the current channel.
  158. * So we just do nothing here and the switch
  159. * will be performed on the last TBTT.
  160. */
  161. if (!ieee80211_csa_is_complete(csa_vif)) {
  162. IWL_WARN(mvm, "CSA NOA started too early\n");
  163. goto out_unlock;
  164. }
  165. ieee80211_csa_finish(csa_vif);
  166. rcu_read_unlock();
  167. RCU_INIT_POINTER(mvm->csa_vif, NULL);
  168. return;
  169. out_unlock:
  170. rcu_read_unlock();
  171. }
  172. static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
  173. struct ieee80211_vif *vif,
  174. const char *errmsg)
  175. {
  176. if (vif->type != NL80211_IFTYPE_STATION)
  177. return false;
  178. if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
  179. return false;
  180. if (errmsg)
  181. IWL_ERR(mvm, "%s\n", errmsg);
  182. iwl_mvm_connection_loss(mvm, vif, errmsg);
  183. return true;
  184. }
  185. static void
  186. iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
  187. struct iwl_mvm_time_event_data *te_data,
  188. struct iwl_time_event_notif *notif)
  189. {
  190. struct ieee80211_vif *vif = te_data->vif;
  191. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  192. if (!notif->status)
  193. IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
  194. switch (te_data->vif->type) {
  195. case NL80211_IFTYPE_AP:
  196. if (!notif->status)
  197. mvmvif->csa_failed = true;
  198. iwl_mvm_csa_noa_start(mvm);
  199. break;
  200. case NL80211_IFTYPE_STATION:
  201. if (!notif->status) {
  202. iwl_mvm_connection_loss(mvm, vif,
  203. "CSA TE failed to start");
  204. break;
  205. }
  206. iwl_mvm_csa_client_absent(mvm, te_data->vif);
  207. ieee80211_chswitch_done(te_data->vif, true);
  208. break;
  209. default:
  210. /* should never happen */
  211. WARN_ON_ONCE(1);
  212. break;
  213. }
  214. /* we don't need it anymore */
  215. iwl_mvm_te_clear_data(mvm, te_data);
  216. }
  217. static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
  218. struct iwl_time_event_notif *notif,
  219. struct iwl_mvm_time_event_data *te_data)
  220. {
  221. struct iwl_fw_dbg_trigger_tlv *trig;
  222. struct iwl_fw_dbg_trigger_time_event *te_trig;
  223. int i;
  224. if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
  225. return;
  226. trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
  227. te_trig = (void *)trig->data;
  228. if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
  229. ieee80211_vif_to_wdev(te_data->vif),
  230. trig))
  231. return;
  232. for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
  233. u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
  234. u32 trig_action_bitmap =
  235. le32_to_cpu(te_trig->time_events[i].action_bitmap);
  236. u32 trig_status_bitmap =
  237. le32_to_cpu(te_trig->time_events[i].status_bitmap);
  238. if (trig_te_id != te_data->id ||
  239. !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
  240. !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
  241. continue;
  242. iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
  243. "Time event %d Action 0x%x received status: %d",
  244. te_data->id,
  245. le32_to_cpu(notif->action),
  246. le32_to_cpu(notif->status));
  247. break;
  248. }
  249. }
  250. /*
  251. * Handles a FW notification for an event that is known to the driver.
  252. *
  253. * @mvm: the mvm component
  254. * @te_data: the time event data
  255. * @notif: the notification data corresponding the time event data.
  256. */
  257. static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
  258. struct iwl_mvm_time_event_data *te_data,
  259. struct iwl_time_event_notif *notif)
  260. {
  261. lockdep_assert_held(&mvm->time_event_lock);
  262. IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
  263. le32_to_cpu(notif->unique_id),
  264. le32_to_cpu(notif->action));
  265. iwl_mvm_te_check_trigger(mvm, notif, te_data);
  266. /*
  267. * The FW sends the start/end time event notifications even for events
  268. * that it fails to schedule. This is indicated in the status field of
  269. * the notification. This happens in cases that the scheduler cannot
  270. * find a schedule that can handle the event (for example requesting a
  271. * P2P Device discoveribility, while there are other higher priority
  272. * events in the system).
  273. */
  274. if (!le32_to_cpu(notif->status)) {
  275. const char *msg;
  276. if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
  277. msg = "Time Event start notification failure";
  278. else
  279. msg = "Time Event end notification failure";
  280. IWL_DEBUG_TE(mvm, "%s\n", msg);
  281. if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
  282. iwl_mvm_te_clear_data(mvm, te_data);
  283. return;
  284. }
  285. }
  286. if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
  287. IWL_DEBUG_TE(mvm,
  288. "TE ended - current time %lu, estimated end %lu\n",
  289. jiffies, te_data->end_jiffies);
  290. switch (te_data->vif->type) {
  291. case NL80211_IFTYPE_P2P_DEVICE:
  292. ieee80211_remain_on_channel_expired(mvm->hw);
  293. iwl_mvm_roc_finished(mvm);
  294. break;
  295. case NL80211_IFTYPE_STATION:
  296. /*
  297. * By now, we should have finished association
  298. * and know the dtim period.
  299. */
  300. iwl_mvm_te_check_disconnect(mvm, te_data->vif,
  301. "No association and the time event is over already...");
  302. break;
  303. default:
  304. break;
  305. }
  306. iwl_mvm_te_clear_data(mvm, te_data);
  307. } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
  308. te_data->running = true;
  309. te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
  310. if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
  311. set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
  312. iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
  313. ieee80211_ready_on_channel(mvm->hw);
  314. } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
  315. iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
  316. }
  317. } else {
  318. IWL_WARN(mvm, "Got TE with unknown action\n");
  319. }
  320. }
  321. /*
  322. * Handle A Aux ROC time event
  323. */
  324. static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
  325. struct iwl_time_event_notif *notif)
  326. {
  327. struct iwl_mvm_time_event_data *te_data, *tmp;
  328. bool aux_roc_te = false;
  329. list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
  330. if (le32_to_cpu(notif->unique_id) == te_data->uid) {
  331. aux_roc_te = true;
  332. break;
  333. }
  334. }
  335. if (!aux_roc_te) /* Not a Aux ROC time event */
  336. return -EINVAL;
  337. iwl_mvm_te_check_trigger(mvm, notif, te_data);
  338. IWL_DEBUG_TE(mvm,
  339. "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n",
  340. le32_to_cpu(notif->unique_id),
  341. le32_to_cpu(notif->action), le32_to_cpu(notif->status));
  342. if (!le32_to_cpu(notif->status) ||
  343. le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
  344. /* End TE, notify mac80211 */
  345. ieee80211_remain_on_channel_expired(mvm->hw);
  346. iwl_mvm_roc_finished(mvm); /* flush aux queue */
  347. list_del(&te_data->list); /* remove from list */
  348. te_data->running = false;
  349. te_data->vif = NULL;
  350. te_data->uid = 0;
  351. te_data->id = TE_MAX;
  352. } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
  353. set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
  354. te_data->running = true;
  355. iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
  356. ieee80211_ready_on_channel(mvm->hw); /* Start TE */
  357. } else {
  358. IWL_DEBUG_TE(mvm,
  359. "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
  360. le32_to_cpu(notif->action));
  361. return -EINVAL;
  362. }
  363. return 0;
  364. }
  365. /*
  366. * The Rx handler for time event notifications
  367. */
  368. void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
  369. struct iwl_rx_cmd_buffer *rxb)
  370. {
  371. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  372. struct iwl_time_event_notif *notif = (void *)pkt->data;
  373. struct iwl_mvm_time_event_data *te_data, *tmp;
  374. IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
  375. le32_to_cpu(notif->unique_id),
  376. le32_to_cpu(notif->action));
  377. spin_lock_bh(&mvm->time_event_lock);
  378. /* This time event is triggered for Aux ROC request */
  379. if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
  380. goto unlock;
  381. list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
  382. if (le32_to_cpu(notif->unique_id) == te_data->uid)
  383. iwl_mvm_te_handle_notif(mvm, te_data, notif);
  384. }
  385. unlock:
  386. spin_unlock_bh(&mvm->time_event_lock);
  387. }
  388. static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
  389. struct iwl_rx_packet *pkt, void *data)
  390. {
  391. struct iwl_mvm *mvm =
  392. container_of(notif_wait, struct iwl_mvm, notif_wait);
  393. struct iwl_mvm_time_event_data *te_data = data;
  394. struct iwl_time_event_notif *resp;
  395. int resp_len = iwl_rx_packet_payload_len(pkt);
  396. if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
  397. return true;
  398. if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
  399. IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
  400. return true;
  401. }
  402. resp = (void *)pkt->data;
  403. /* te_data->uid is already set in the TIME_EVENT_CMD response */
  404. if (le32_to_cpu(resp->unique_id) != te_data->uid)
  405. return false;
  406. IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
  407. te_data->uid);
  408. if (!resp->status)
  409. IWL_ERR(mvm,
  410. "TIME_EVENT_NOTIFICATION received but not executed\n");
  411. return true;
  412. }
  413. static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
  414. struct iwl_rx_packet *pkt, void *data)
  415. {
  416. struct iwl_mvm *mvm =
  417. container_of(notif_wait, struct iwl_mvm, notif_wait);
  418. struct iwl_mvm_time_event_data *te_data = data;
  419. struct iwl_time_event_resp *resp;
  420. int resp_len = iwl_rx_packet_payload_len(pkt);
  421. if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
  422. return true;
  423. if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
  424. IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
  425. return true;
  426. }
  427. resp = (void *)pkt->data;
  428. /* we should never get a response to another TIME_EVENT_CMD here */
  429. if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
  430. return false;
  431. te_data->uid = le32_to_cpu(resp->unique_id);
  432. IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
  433. te_data->uid);
  434. return true;
  435. }
  436. static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
  437. struct ieee80211_vif *vif,
  438. struct iwl_mvm_time_event_data *te_data,
  439. struct iwl_time_event_cmd *te_cmd)
  440. {
  441. static const u16 time_event_response[] = { TIME_EVENT_CMD };
  442. struct iwl_notification_wait wait_time_event;
  443. int ret;
  444. lockdep_assert_held(&mvm->mutex);
  445. IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
  446. le32_to_cpu(te_cmd->duration));
  447. spin_lock_bh(&mvm->time_event_lock);
  448. if (WARN_ON(te_data->id != TE_MAX)) {
  449. spin_unlock_bh(&mvm->time_event_lock);
  450. return -EIO;
  451. }
  452. te_data->vif = vif;
  453. te_data->duration = le32_to_cpu(te_cmd->duration);
  454. te_data->id = le32_to_cpu(te_cmd->id);
  455. list_add_tail(&te_data->list, &mvm->time_event_list);
  456. spin_unlock_bh(&mvm->time_event_lock);
  457. /*
  458. * Use a notification wait, which really just processes the
  459. * command response and doesn't wait for anything, in order
  460. * to be able to process the response and get the UID inside
  461. * the RX path. Using CMD_WANT_SKB doesn't work because it
  462. * stores the buffer and then wakes up this thread, by which
  463. * time another notification (that the time event started)
  464. * might already be processed unsuccessfully.
  465. */
  466. iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
  467. time_event_response,
  468. ARRAY_SIZE(time_event_response),
  469. iwl_mvm_time_event_response, te_data);
  470. ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
  471. sizeof(*te_cmd), te_cmd);
  472. if (ret) {
  473. IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
  474. iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
  475. goto out_clear_te;
  476. }
  477. /* No need to wait for anything, so just pass 1 (0 isn't valid) */
  478. ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
  479. /* should never fail */
  480. WARN_ON_ONCE(ret);
  481. if (ret) {
  482. out_clear_te:
  483. spin_lock_bh(&mvm->time_event_lock);
  484. iwl_mvm_te_clear_data(mvm, te_data);
  485. spin_unlock_bh(&mvm->time_event_lock);
  486. }
  487. return ret;
  488. }
  489. void iwl_mvm_protect_session(struct iwl_mvm *mvm,
  490. struct ieee80211_vif *vif,
  491. u32 duration, u32 min_duration,
  492. u32 max_delay, bool wait_for_notif)
  493. {
  494. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  495. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  496. const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
  497. struct iwl_notification_wait wait_te_notif;
  498. struct iwl_time_event_cmd time_cmd = {};
  499. lockdep_assert_held(&mvm->mutex);
  500. if (te_data->running &&
  501. time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
  502. IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
  503. jiffies_to_msecs(te_data->end_jiffies - jiffies));
  504. return;
  505. }
  506. if (te_data->running) {
  507. IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
  508. te_data->uid,
  509. jiffies_to_msecs(te_data->end_jiffies - jiffies));
  510. /*
  511. * we don't have enough time
  512. * cancel the current TE and issue a new one
  513. * Of course it would be better to remove the old one only
  514. * when the new one is added, but we don't care if we are off
  515. * channel for a bit. All we need to do, is not to return
  516. * before we actually begin to be on the channel.
  517. */
  518. iwl_mvm_stop_session_protection(mvm, vif);
  519. }
  520. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
  521. time_cmd.id_and_color =
  522. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  523. time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
  524. time_cmd.apply_time = cpu_to_le32(0);
  525. time_cmd.max_frags = TE_V2_FRAG_NONE;
  526. time_cmd.max_delay = cpu_to_le32(max_delay);
  527. /* TODO: why do we need to interval = bi if it is not periodic? */
  528. time_cmd.interval = cpu_to_le32(1);
  529. time_cmd.duration = cpu_to_le32(duration);
  530. time_cmd.repeat = 1;
  531. time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
  532. TE_V2_NOTIF_HOST_EVENT_END |
  533. T2_V2_START_IMMEDIATELY);
  534. if (!wait_for_notif) {
  535. iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  536. return;
  537. }
  538. /*
  539. * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
  540. * right after we send the time event
  541. */
  542. iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
  543. te_notif_response,
  544. ARRAY_SIZE(te_notif_response),
  545. iwl_mvm_te_notif, te_data);
  546. /* If TE was sent OK - wait for the notification that started */
  547. if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
  548. IWL_ERR(mvm, "Failed to add TE to protect session\n");
  549. iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
  550. } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
  551. TU_TO_JIFFIES(max_delay))) {
  552. IWL_ERR(mvm, "Failed to protect session until TE\n");
  553. }
  554. }
  555. static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
  556. struct iwl_mvm_time_event_data *te_data,
  557. u32 *uid)
  558. {
  559. u32 id;
  560. /*
  561. * It is possible that by the time we got to this point the time
  562. * event was already removed.
  563. */
  564. spin_lock_bh(&mvm->time_event_lock);
  565. /* Save time event uid before clearing its data */
  566. *uid = te_data->uid;
  567. id = te_data->id;
  568. /*
  569. * The clear_data function handles time events that were already removed
  570. */
  571. iwl_mvm_te_clear_data(mvm, te_data);
  572. spin_unlock_bh(&mvm->time_event_lock);
  573. /*
  574. * It is possible that by the time we try to remove it, the time event
  575. * has already ended and removed. In such a case there is no need to
  576. * send a removal command.
  577. */
  578. if (id == TE_MAX) {
  579. IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
  580. return false;
  581. }
  582. return true;
  583. }
  584. /*
  585. * Explicit request to remove a aux roc time event. The removal of a time
  586. * event needs to be synchronized with the flow of a time event's end
  587. * notification, which also removes the time event from the op mode
  588. * data structures.
  589. */
  590. static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
  591. struct iwl_mvm_vif *mvmvif,
  592. struct iwl_mvm_time_event_data *te_data)
  593. {
  594. struct iwl_hs20_roc_req aux_cmd = {};
  595. u32 uid;
  596. int ret;
  597. if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
  598. return;
  599. aux_cmd.event_unique_id = cpu_to_le32(uid);
  600. aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
  601. aux_cmd.id_and_color =
  602. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  603. IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
  604. le32_to_cpu(aux_cmd.event_unique_id));
  605. ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
  606. sizeof(aux_cmd), &aux_cmd);
  607. if (WARN_ON(ret))
  608. return;
  609. }
  610. /*
  611. * Explicit request to remove a time event. The removal of a time event needs to
  612. * be synchronized with the flow of a time event's end notification, which also
  613. * removes the time event from the op mode data structures.
  614. */
  615. void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
  616. struct iwl_mvm_vif *mvmvif,
  617. struct iwl_mvm_time_event_data *te_data)
  618. {
  619. struct iwl_time_event_cmd time_cmd = {};
  620. u32 uid;
  621. int ret;
  622. if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
  623. return;
  624. /* When we remove a TE, the UID is to be set in the id field */
  625. time_cmd.id = cpu_to_le32(uid);
  626. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
  627. time_cmd.id_and_color =
  628. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  629. IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
  630. ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
  631. sizeof(time_cmd), &time_cmd);
  632. if (WARN_ON(ret))
  633. return;
  634. }
  635. void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
  636. struct ieee80211_vif *vif)
  637. {
  638. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  639. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  640. u32 id;
  641. lockdep_assert_held(&mvm->mutex);
  642. spin_lock_bh(&mvm->time_event_lock);
  643. id = te_data->id;
  644. spin_unlock_bh(&mvm->time_event_lock);
  645. if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
  646. IWL_DEBUG_TE(mvm,
  647. "don't remove TE with id=%u (not session protection)\n",
  648. id);
  649. return;
  650. }
  651. iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
  652. }
  653. int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  654. int duration, enum ieee80211_roc_type type)
  655. {
  656. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  657. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  658. struct iwl_time_event_cmd time_cmd = {};
  659. lockdep_assert_held(&mvm->mutex);
  660. if (te_data->running) {
  661. IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
  662. return -EBUSY;
  663. }
  664. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
  665. time_cmd.id_and_color =
  666. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  667. switch (type) {
  668. case IEEE80211_ROC_TYPE_NORMAL:
  669. time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
  670. break;
  671. case IEEE80211_ROC_TYPE_MGMT_TX:
  672. time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
  673. break;
  674. default:
  675. WARN_ONCE(1, "Got an invalid ROC type\n");
  676. return -EINVAL;
  677. }
  678. time_cmd.apply_time = cpu_to_le32(0);
  679. time_cmd.interval = cpu_to_le32(1);
  680. /*
  681. * The P2P Device TEs can have lower priority than other events
  682. * that are being scheduled by the driver/fw, and thus it might not be
  683. * scheduled. To improve the chances of it being scheduled, allow them
  684. * to be fragmented, and in addition allow them to be delayed.
  685. */
  686. time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
  687. time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
  688. time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
  689. time_cmd.repeat = 1;
  690. time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
  691. TE_V2_NOTIF_HOST_EVENT_END |
  692. T2_V2_START_IMMEDIATELY);
  693. return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  694. }
  695. static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
  696. {
  697. struct iwl_mvm_time_event_data *te_data;
  698. lockdep_assert_held(&mvm->mutex);
  699. spin_lock_bh(&mvm->time_event_lock);
  700. /*
  701. * Iterate over the list of time events and find the time event that is
  702. * associated with a P2P_DEVICE interface.
  703. * This assumes that a P2P_DEVICE interface can have only a single time
  704. * event at any given time and this time event coresponds to a ROC
  705. * request
  706. */
  707. list_for_each_entry(te_data, &mvm->time_event_list, list) {
  708. if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
  709. goto out;
  710. }
  711. /* There can only be at most one AUX ROC time event, we just use the
  712. * list to simplify/unify code. Remove it if it exists.
  713. */
  714. te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
  715. struct iwl_mvm_time_event_data,
  716. list);
  717. out:
  718. spin_unlock_bh(&mvm->time_event_lock);
  719. return te_data;
  720. }
  721. void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
  722. {
  723. struct iwl_mvm_time_event_data *te_data;
  724. u32 uid;
  725. te_data = iwl_mvm_get_roc_te(mvm);
  726. if (te_data)
  727. __iwl_mvm_remove_time_event(mvm, te_data, &uid);
  728. }
  729. void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
  730. {
  731. struct iwl_mvm_vif *mvmvif;
  732. struct iwl_mvm_time_event_data *te_data;
  733. te_data = iwl_mvm_get_roc_te(mvm);
  734. if (!te_data) {
  735. IWL_WARN(mvm, "No remain on channel event\n");
  736. return;
  737. }
  738. mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
  739. if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
  740. iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
  741. set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
  742. } else {
  743. iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
  744. }
  745. iwl_mvm_roc_finished(mvm);
  746. }
  747. int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
  748. struct ieee80211_vif *vif,
  749. u32 duration, u32 apply_time)
  750. {
  751. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  752. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  753. struct iwl_time_event_cmd time_cmd = {};
  754. lockdep_assert_held(&mvm->mutex);
  755. if (te_data->running) {
  756. u32 id;
  757. spin_lock_bh(&mvm->time_event_lock);
  758. id = te_data->id;
  759. spin_unlock_bh(&mvm->time_event_lock);
  760. if (id == TE_CHANNEL_SWITCH_PERIOD) {
  761. IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
  762. return -EBUSY;
  763. }
  764. /*
  765. * Remove the session protection time event to allow the
  766. * channel switch. If we got here, we just heard a beacon so
  767. * the session protection is not needed anymore anyway.
  768. */
  769. iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
  770. }
  771. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
  772. time_cmd.id_and_color =
  773. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  774. time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
  775. time_cmd.apply_time = cpu_to_le32(apply_time);
  776. time_cmd.max_frags = TE_V2_FRAG_NONE;
  777. time_cmd.duration = cpu_to_le32(duration);
  778. time_cmd.repeat = 1;
  779. time_cmd.interval = cpu_to_le32(1);
  780. time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
  781. TE_V2_ABSENCE);
  782. return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  783. }