time-event.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  9. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10. * Copyright(c) 2017 Intel Deutschland GmbH
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of version 2 of the GNU General Public License as
  14. * published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  24. * USA
  25. *
  26. * The full GNU General Public License is included in this distribution
  27. * in the file called COPYING.
  28. *
  29. * Contact Information:
  30. * Intel Linux Wireless <linuxwifi@intel.com>
  31. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  32. *
  33. * BSD LICENSE
  34. *
  35. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  36. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  37. * Copyright(c) 2017 Intel Deutschland GmbH
  38. * All rights reserved.
  39. *
  40. * Redistribution and use in source and binary forms, with or without
  41. * modification, are permitted provided that the following conditions
  42. * are met:
  43. *
  44. * * Redistributions of source code must retain the above copyright
  45. * notice, this list of conditions and the following disclaimer.
  46. * * Redistributions in binary form must reproduce the above copyright
  47. * notice, this list of conditions and the following disclaimer in
  48. * the documentation and/or other materials provided with the
  49. * distribution.
  50. * * Neither the name Intel Corporation nor the names of its
  51. * contributors may be used to endorse or promote products derived
  52. * from this software without specific prior written permission.
  53. *
  54. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  55. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  56. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  57. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  58. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  59. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  60. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  61. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  62. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  63. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  64. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  65. *
  66. *****************************************************************************/
  67. #include <linux/jiffies.h>
  68. #include <net/mac80211.h>
  69. #include "fw/notif-wait.h"
  70. #include "iwl-trans.h"
  71. #include "fw-api.h"
  72. #include "time-event.h"
  73. #include "mvm.h"
  74. #include "iwl-io.h"
  75. #include "iwl-prph.h"
  76. /*
  77. * For the high priority TE use a time event type that has similar priority to
  78. * the FW's action scan priority.
  79. */
  80. #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
  81. #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
  82. void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
  83. struct iwl_mvm_time_event_data *te_data)
  84. {
  85. lockdep_assert_held(&mvm->time_event_lock);
  86. if (!te_data->vif)
  87. return;
  88. list_del(&te_data->list);
  89. te_data->running = false;
  90. te_data->uid = 0;
  91. te_data->id = TE_MAX;
  92. te_data->vif = NULL;
  93. }
  94. void iwl_mvm_roc_done_wk(struct work_struct *wk)
  95. {
  96. struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
  97. u32 queues = 0;
  98. /*
  99. * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
  100. * This will cause the TX path to drop offchannel transmissions.
  101. * That would also be done by mac80211, but it is racy, in particular
  102. * in the case that the time event actually completed in the firmware
  103. * (which is handled in iwl_mvm_te_handle_notif).
  104. */
  105. if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
  106. queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
  107. iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
  108. }
  109. if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
  110. queues |= BIT(mvm->aux_queue);
  111. iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
  112. }
  113. synchronize_net();
  114. /*
  115. * Flush the offchannel queue -- this is called when the time
  116. * event finishes or is canceled, so that frames queued for it
  117. * won't get stuck on the queue and be transmitted in the next
  118. * time event.
  119. * We have to send the command asynchronously since this cannot
  120. * be under the mutex for locking reasons, but that's not an
  121. * issue as it will have to complete before the next command is
  122. * executed, and a new time event means a new command.
  123. */
  124. iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
  125. }
  126. static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
  127. {
  128. /*
  129. * Of course, our status bit is just as racy as mac80211, so in
  130. * addition, fire off the work struct which will drop all frames
  131. * from the hardware queues that made it through the race. First
  132. * it will of course synchronize the TX path to make sure that
  133. * any *new* TX will be rejected.
  134. */
  135. schedule_work(&mvm->roc_done_wk);
  136. }
  137. static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
  138. {
  139. struct ieee80211_vif *csa_vif;
  140. rcu_read_lock();
  141. csa_vif = rcu_dereference(mvm->csa_vif);
  142. if (!csa_vif || !csa_vif->csa_active)
  143. goto out_unlock;
  144. IWL_DEBUG_TE(mvm, "CSA NOA started\n");
  145. /*
  146. * CSA NoA is started but we still have beacons to
  147. * transmit on the current channel.
  148. * So we just do nothing here and the switch
  149. * will be performed on the last TBTT.
  150. */
  151. if (!ieee80211_csa_is_complete(csa_vif)) {
  152. IWL_WARN(mvm, "CSA NOA started too early\n");
  153. goto out_unlock;
  154. }
  155. ieee80211_csa_finish(csa_vif);
  156. rcu_read_unlock();
  157. RCU_INIT_POINTER(mvm->csa_vif, NULL);
  158. return;
  159. out_unlock:
  160. rcu_read_unlock();
  161. }
  162. static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
  163. struct ieee80211_vif *vif,
  164. const char *errmsg)
  165. {
  166. if (vif->type != NL80211_IFTYPE_STATION)
  167. return false;
  168. if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
  169. return false;
  170. if (errmsg)
  171. IWL_ERR(mvm, "%s\n", errmsg);
  172. iwl_mvm_connection_loss(mvm, vif, errmsg);
  173. return true;
  174. }
  175. static void
  176. iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
  177. struct iwl_mvm_time_event_data *te_data,
  178. struct iwl_time_event_notif *notif)
  179. {
  180. struct ieee80211_vif *vif = te_data->vif;
  181. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  182. if (!notif->status)
  183. IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
  184. switch (te_data->vif->type) {
  185. case NL80211_IFTYPE_AP:
  186. if (!notif->status)
  187. mvmvif->csa_failed = true;
  188. iwl_mvm_csa_noa_start(mvm);
  189. break;
  190. case NL80211_IFTYPE_STATION:
  191. if (!notif->status) {
  192. iwl_mvm_connection_loss(mvm, vif,
  193. "CSA TE failed to start");
  194. break;
  195. }
  196. iwl_mvm_csa_client_absent(mvm, te_data->vif);
  197. ieee80211_chswitch_done(te_data->vif, true);
  198. break;
  199. default:
  200. /* should never happen */
  201. WARN_ON_ONCE(1);
  202. break;
  203. }
  204. /* we don't need it anymore */
  205. iwl_mvm_te_clear_data(mvm, te_data);
  206. }
  207. static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
  208. struct iwl_time_event_notif *notif,
  209. struct iwl_mvm_time_event_data *te_data)
  210. {
  211. struct iwl_fw_dbg_trigger_tlv *trig;
  212. struct iwl_fw_dbg_trigger_time_event *te_trig;
  213. int i;
  214. if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
  215. return;
  216. trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
  217. te_trig = (void *)trig->data;
  218. if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
  219. ieee80211_vif_to_wdev(te_data->vif),
  220. trig))
  221. return;
  222. for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
  223. u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
  224. u32 trig_action_bitmap =
  225. le32_to_cpu(te_trig->time_events[i].action_bitmap);
  226. u32 trig_status_bitmap =
  227. le32_to_cpu(te_trig->time_events[i].status_bitmap);
  228. if (trig_te_id != te_data->id ||
  229. !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
  230. !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
  231. continue;
  232. iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
  233. "Time event %d Action 0x%x received status: %d",
  234. te_data->id,
  235. le32_to_cpu(notif->action),
  236. le32_to_cpu(notif->status));
  237. break;
  238. }
  239. }
  240. /*
  241. * Handles a FW notification for an event that is known to the driver.
  242. *
  243. * @mvm: the mvm component
  244. * @te_data: the time event data
  245. * @notif: the notification data corresponding the time event data.
  246. */
  247. static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
  248. struct iwl_mvm_time_event_data *te_data,
  249. struct iwl_time_event_notif *notif)
  250. {
  251. lockdep_assert_held(&mvm->time_event_lock);
  252. IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
  253. le32_to_cpu(notif->unique_id),
  254. le32_to_cpu(notif->action));
  255. iwl_mvm_te_check_trigger(mvm, notif, te_data);
  256. /*
  257. * The FW sends the start/end time event notifications even for events
  258. * that it fails to schedule. This is indicated in the status field of
  259. * the notification. This happens in cases that the scheduler cannot
  260. * find a schedule that can handle the event (for example requesting a
  261. * P2P Device discoveribility, while there are other higher priority
  262. * events in the system).
  263. */
  264. if (!le32_to_cpu(notif->status)) {
  265. const char *msg;
  266. if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
  267. msg = "Time Event start notification failure";
  268. else
  269. msg = "Time Event end notification failure";
  270. IWL_DEBUG_TE(mvm, "%s\n", msg);
  271. if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
  272. iwl_mvm_te_clear_data(mvm, te_data);
  273. return;
  274. }
  275. }
  276. if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
  277. IWL_DEBUG_TE(mvm,
  278. "TE ended - current time %lu, estimated end %lu\n",
  279. jiffies, te_data->end_jiffies);
  280. switch (te_data->vif->type) {
  281. case NL80211_IFTYPE_P2P_DEVICE:
  282. ieee80211_remain_on_channel_expired(mvm->hw);
  283. iwl_mvm_roc_finished(mvm);
  284. break;
  285. case NL80211_IFTYPE_STATION:
  286. /*
  287. * By now, we should have finished association
  288. * and know the dtim period.
  289. */
  290. iwl_mvm_te_check_disconnect(mvm, te_data->vif,
  291. "No association and the time event is over already...");
  292. break;
  293. default:
  294. break;
  295. }
  296. iwl_mvm_te_clear_data(mvm, te_data);
  297. } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
  298. te_data->running = true;
  299. te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
  300. if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
  301. set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
  302. iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
  303. ieee80211_ready_on_channel(mvm->hw);
  304. } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
  305. iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
  306. }
  307. } else {
  308. IWL_WARN(mvm, "Got TE with unknown action\n");
  309. }
  310. }
  311. /*
  312. * Handle A Aux ROC time event
  313. */
  314. static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
  315. struct iwl_time_event_notif *notif)
  316. {
  317. struct iwl_mvm_time_event_data *te_data, *tmp;
  318. bool aux_roc_te = false;
  319. list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
  320. if (le32_to_cpu(notif->unique_id) == te_data->uid) {
  321. aux_roc_te = true;
  322. break;
  323. }
  324. }
  325. if (!aux_roc_te) /* Not a Aux ROC time event */
  326. return -EINVAL;
  327. iwl_mvm_te_check_trigger(mvm, notif, te_data);
  328. IWL_DEBUG_TE(mvm,
  329. "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n",
  330. le32_to_cpu(notif->unique_id),
  331. le32_to_cpu(notif->action), le32_to_cpu(notif->status));
  332. if (!le32_to_cpu(notif->status) ||
  333. le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
  334. /* End TE, notify mac80211 */
  335. ieee80211_remain_on_channel_expired(mvm->hw);
  336. iwl_mvm_roc_finished(mvm); /* flush aux queue */
  337. list_del(&te_data->list); /* remove from list */
  338. te_data->running = false;
  339. te_data->vif = NULL;
  340. te_data->uid = 0;
  341. te_data->id = TE_MAX;
  342. } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
  343. set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
  344. te_data->running = true;
  345. iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
  346. ieee80211_ready_on_channel(mvm->hw); /* Start TE */
  347. } else {
  348. IWL_DEBUG_TE(mvm,
  349. "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
  350. le32_to_cpu(notif->action));
  351. return -EINVAL;
  352. }
  353. return 0;
  354. }
  355. /*
  356. * The Rx handler for time event notifications
  357. */
  358. void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
  359. struct iwl_rx_cmd_buffer *rxb)
  360. {
  361. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  362. struct iwl_time_event_notif *notif = (void *)pkt->data;
  363. struct iwl_mvm_time_event_data *te_data, *tmp;
  364. IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
  365. le32_to_cpu(notif->unique_id),
  366. le32_to_cpu(notif->action));
  367. spin_lock_bh(&mvm->time_event_lock);
  368. /* This time event is triggered for Aux ROC request */
  369. if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
  370. goto unlock;
  371. list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
  372. if (le32_to_cpu(notif->unique_id) == te_data->uid)
  373. iwl_mvm_te_handle_notif(mvm, te_data, notif);
  374. }
  375. unlock:
  376. spin_unlock_bh(&mvm->time_event_lock);
  377. }
  378. static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
  379. struct iwl_rx_packet *pkt, void *data)
  380. {
  381. struct iwl_mvm *mvm =
  382. container_of(notif_wait, struct iwl_mvm, notif_wait);
  383. struct iwl_mvm_time_event_data *te_data = data;
  384. struct iwl_time_event_notif *resp;
  385. int resp_len = iwl_rx_packet_payload_len(pkt);
  386. if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
  387. return true;
  388. if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
  389. IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
  390. return true;
  391. }
  392. resp = (void *)pkt->data;
  393. /* te_data->uid is already set in the TIME_EVENT_CMD response */
  394. if (le32_to_cpu(resp->unique_id) != te_data->uid)
  395. return false;
  396. IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
  397. te_data->uid);
  398. if (!resp->status)
  399. IWL_ERR(mvm,
  400. "TIME_EVENT_NOTIFICATION received but not executed\n");
  401. return true;
  402. }
  403. static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
  404. struct iwl_rx_packet *pkt, void *data)
  405. {
  406. struct iwl_mvm *mvm =
  407. container_of(notif_wait, struct iwl_mvm, notif_wait);
  408. struct iwl_mvm_time_event_data *te_data = data;
  409. struct iwl_time_event_resp *resp;
  410. int resp_len = iwl_rx_packet_payload_len(pkt);
  411. if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
  412. return true;
  413. if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
  414. IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
  415. return true;
  416. }
  417. resp = (void *)pkt->data;
  418. /* we should never get a response to another TIME_EVENT_CMD here */
  419. if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
  420. return false;
  421. te_data->uid = le32_to_cpu(resp->unique_id);
  422. IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
  423. te_data->uid);
  424. return true;
  425. }
  426. static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
  427. struct ieee80211_vif *vif,
  428. struct iwl_mvm_time_event_data *te_data,
  429. struct iwl_time_event_cmd *te_cmd)
  430. {
  431. static const u16 time_event_response[] = { TIME_EVENT_CMD };
  432. struct iwl_notification_wait wait_time_event;
  433. int ret;
  434. lockdep_assert_held(&mvm->mutex);
  435. IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
  436. le32_to_cpu(te_cmd->duration));
  437. spin_lock_bh(&mvm->time_event_lock);
  438. if (WARN_ON(te_data->id != TE_MAX)) {
  439. spin_unlock_bh(&mvm->time_event_lock);
  440. return -EIO;
  441. }
  442. te_data->vif = vif;
  443. te_data->duration = le32_to_cpu(te_cmd->duration);
  444. te_data->id = le32_to_cpu(te_cmd->id);
  445. list_add_tail(&te_data->list, &mvm->time_event_list);
  446. spin_unlock_bh(&mvm->time_event_lock);
  447. /*
  448. * Use a notification wait, which really just processes the
  449. * command response and doesn't wait for anything, in order
  450. * to be able to process the response and get the UID inside
  451. * the RX path. Using CMD_WANT_SKB doesn't work because it
  452. * stores the buffer and then wakes up this thread, by which
  453. * time another notification (that the time event started)
  454. * might already be processed unsuccessfully.
  455. */
  456. iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
  457. time_event_response,
  458. ARRAY_SIZE(time_event_response),
  459. iwl_mvm_time_event_response, te_data);
  460. ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
  461. sizeof(*te_cmd), te_cmd);
  462. if (ret) {
  463. IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
  464. iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
  465. goto out_clear_te;
  466. }
  467. /* No need to wait for anything, so just pass 1 (0 isn't valid) */
  468. ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
  469. /* should never fail */
  470. WARN_ON_ONCE(ret);
  471. if (ret) {
  472. out_clear_te:
  473. spin_lock_bh(&mvm->time_event_lock);
  474. iwl_mvm_te_clear_data(mvm, te_data);
  475. spin_unlock_bh(&mvm->time_event_lock);
  476. }
  477. return ret;
  478. }
  479. void iwl_mvm_protect_session(struct iwl_mvm *mvm,
  480. struct ieee80211_vif *vif,
  481. u32 duration, u32 min_duration,
  482. u32 max_delay, bool wait_for_notif)
  483. {
  484. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  485. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  486. const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
  487. struct iwl_notification_wait wait_te_notif;
  488. struct iwl_time_event_cmd time_cmd = {};
  489. lockdep_assert_held(&mvm->mutex);
  490. if (te_data->running &&
  491. time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
  492. IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
  493. jiffies_to_msecs(te_data->end_jiffies - jiffies));
  494. return;
  495. }
  496. if (te_data->running) {
  497. IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
  498. te_data->uid,
  499. jiffies_to_msecs(te_data->end_jiffies - jiffies));
  500. /*
  501. * we don't have enough time
  502. * cancel the current TE and issue a new one
  503. * Of course it would be better to remove the old one only
  504. * when the new one is added, but we don't care if we are off
  505. * channel for a bit. All we need to do, is not to return
  506. * before we actually begin to be on the channel.
  507. */
  508. iwl_mvm_stop_session_protection(mvm, vif);
  509. }
  510. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
  511. time_cmd.id_and_color =
  512. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  513. time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
  514. time_cmd.apply_time = cpu_to_le32(0);
  515. time_cmd.max_frags = TE_V2_FRAG_NONE;
  516. time_cmd.max_delay = cpu_to_le32(max_delay);
  517. /* TODO: why do we need to interval = bi if it is not periodic? */
  518. time_cmd.interval = cpu_to_le32(1);
  519. time_cmd.duration = cpu_to_le32(duration);
  520. time_cmd.repeat = 1;
  521. time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
  522. TE_V2_NOTIF_HOST_EVENT_END |
  523. T2_V2_START_IMMEDIATELY);
  524. if (!wait_for_notif) {
  525. iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  526. return;
  527. }
  528. /*
  529. * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
  530. * right after we send the time event
  531. */
  532. iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
  533. te_notif_response,
  534. ARRAY_SIZE(te_notif_response),
  535. iwl_mvm_te_notif, te_data);
  536. /* If TE was sent OK - wait for the notification that started */
  537. if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
  538. IWL_ERR(mvm, "Failed to add TE to protect session\n");
  539. iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
  540. } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
  541. TU_TO_JIFFIES(max_delay))) {
  542. IWL_ERR(mvm, "Failed to protect session until TE\n");
  543. }
  544. }
  545. static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
  546. struct iwl_mvm_time_event_data *te_data,
  547. u32 *uid)
  548. {
  549. u32 id;
  550. /*
  551. * It is possible that by the time we got to this point the time
  552. * event was already removed.
  553. */
  554. spin_lock_bh(&mvm->time_event_lock);
  555. /* Save time event uid before clearing its data */
  556. *uid = te_data->uid;
  557. id = te_data->id;
  558. /*
  559. * The clear_data function handles time events that were already removed
  560. */
  561. iwl_mvm_te_clear_data(mvm, te_data);
  562. spin_unlock_bh(&mvm->time_event_lock);
  563. /*
  564. * It is possible that by the time we try to remove it, the time event
  565. * has already ended and removed. In such a case there is no need to
  566. * send a removal command.
  567. */
  568. if (id == TE_MAX) {
  569. IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
  570. return false;
  571. }
  572. return true;
  573. }
  574. /*
  575. * Explicit request to remove a aux roc time event. The removal of a time
  576. * event needs to be synchronized with the flow of a time event's end
  577. * notification, which also removes the time event from the op mode
  578. * data structures.
  579. */
  580. static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
  581. struct iwl_mvm_vif *mvmvif,
  582. struct iwl_mvm_time_event_data *te_data)
  583. {
  584. struct iwl_hs20_roc_req aux_cmd = {};
  585. u32 uid;
  586. int ret;
  587. if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
  588. return;
  589. aux_cmd.event_unique_id = cpu_to_le32(uid);
  590. aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
  591. aux_cmd.id_and_color =
  592. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  593. IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
  594. le32_to_cpu(aux_cmd.event_unique_id));
  595. ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
  596. sizeof(aux_cmd), &aux_cmd);
  597. if (WARN_ON(ret))
  598. return;
  599. }
  600. /*
  601. * Explicit request to remove a time event. The removal of a time event needs to
  602. * be synchronized with the flow of a time event's end notification, which also
  603. * removes the time event from the op mode data structures.
  604. */
  605. void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
  606. struct iwl_mvm_vif *mvmvif,
  607. struct iwl_mvm_time_event_data *te_data)
  608. {
  609. struct iwl_time_event_cmd time_cmd = {};
  610. u32 uid;
  611. int ret;
  612. if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
  613. return;
  614. /* When we remove a TE, the UID is to be set in the id field */
  615. time_cmd.id = cpu_to_le32(uid);
  616. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
  617. time_cmd.id_and_color =
  618. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  619. IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
  620. ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
  621. sizeof(time_cmd), &time_cmd);
  622. if (WARN_ON(ret))
  623. return;
  624. }
  625. void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
  626. struct ieee80211_vif *vif)
  627. {
  628. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  629. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  630. u32 id;
  631. lockdep_assert_held(&mvm->mutex);
  632. spin_lock_bh(&mvm->time_event_lock);
  633. id = te_data->id;
  634. spin_unlock_bh(&mvm->time_event_lock);
  635. if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
  636. IWL_DEBUG_TE(mvm,
  637. "don't remove TE with id=%u (not session protection)\n",
  638. id);
  639. return;
  640. }
  641. iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
  642. }
  643. int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  644. int duration, enum ieee80211_roc_type type)
  645. {
  646. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  647. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  648. struct iwl_time_event_cmd time_cmd = {};
  649. lockdep_assert_held(&mvm->mutex);
  650. if (te_data->running) {
  651. IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
  652. return -EBUSY;
  653. }
  654. /*
  655. * Flush the done work, just in case it's still pending, so that
  656. * the work it does can complete and we can accept new frames.
  657. */
  658. flush_work(&mvm->roc_done_wk);
  659. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
  660. time_cmd.id_and_color =
  661. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  662. switch (type) {
  663. case IEEE80211_ROC_TYPE_NORMAL:
  664. time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
  665. break;
  666. case IEEE80211_ROC_TYPE_MGMT_TX:
  667. time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
  668. break;
  669. default:
  670. WARN_ONCE(1, "Got an invalid ROC type\n");
  671. return -EINVAL;
  672. }
  673. time_cmd.apply_time = cpu_to_le32(0);
  674. time_cmd.interval = cpu_to_le32(1);
  675. /*
  676. * The P2P Device TEs can have lower priority than other events
  677. * that are being scheduled by the driver/fw, and thus it might not be
  678. * scheduled. To improve the chances of it being scheduled, allow them
  679. * to be fragmented, and in addition allow them to be delayed.
  680. */
  681. time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
  682. time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
  683. time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
  684. time_cmd.repeat = 1;
  685. time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
  686. TE_V2_NOTIF_HOST_EVENT_END |
  687. T2_V2_START_IMMEDIATELY);
  688. return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  689. }
  690. static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
  691. {
  692. struct iwl_mvm_time_event_data *te_data;
  693. lockdep_assert_held(&mvm->mutex);
  694. spin_lock_bh(&mvm->time_event_lock);
  695. /*
  696. * Iterate over the list of time events and find the time event that is
  697. * associated with a P2P_DEVICE interface.
  698. * This assumes that a P2P_DEVICE interface can have only a single time
  699. * event at any given time and this time event coresponds to a ROC
  700. * request
  701. */
  702. list_for_each_entry(te_data, &mvm->time_event_list, list) {
  703. if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
  704. goto out;
  705. }
  706. /* There can only be at most one AUX ROC time event, we just use the
  707. * list to simplify/unify code. Remove it if it exists.
  708. */
  709. te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
  710. struct iwl_mvm_time_event_data,
  711. list);
  712. out:
  713. spin_unlock_bh(&mvm->time_event_lock);
  714. return te_data;
  715. }
  716. void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
  717. {
  718. struct iwl_mvm_time_event_data *te_data;
  719. u32 uid;
  720. te_data = iwl_mvm_get_roc_te(mvm);
  721. if (te_data)
  722. __iwl_mvm_remove_time_event(mvm, te_data, &uid);
  723. }
  724. void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
  725. {
  726. struct iwl_mvm_vif *mvmvif;
  727. struct iwl_mvm_time_event_data *te_data;
  728. te_data = iwl_mvm_get_roc_te(mvm);
  729. if (!te_data) {
  730. IWL_WARN(mvm, "No remain on channel event\n");
  731. return;
  732. }
  733. mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
  734. if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
  735. iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
  736. else
  737. iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
  738. iwl_mvm_roc_finished(mvm);
  739. }
  740. int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
  741. struct ieee80211_vif *vif,
  742. u32 duration, u32 apply_time)
  743. {
  744. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  745. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  746. struct iwl_time_event_cmd time_cmd = {};
  747. lockdep_assert_held(&mvm->mutex);
  748. if (te_data->running) {
  749. u32 id;
  750. spin_lock_bh(&mvm->time_event_lock);
  751. id = te_data->id;
  752. spin_unlock_bh(&mvm->time_event_lock);
  753. if (id == TE_CHANNEL_SWITCH_PERIOD) {
  754. IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
  755. return -EBUSY;
  756. }
  757. /*
  758. * Remove the session protection time event to allow the
  759. * channel switch. If we got here, we just heard a beacon so
  760. * the session protection is not needed anymore anyway.
  761. */
  762. iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
  763. }
  764. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
  765. time_cmd.id_and_color =
  766. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  767. time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
  768. time_cmd.apply_time = cpu_to_le32(apply_time);
  769. time_cmd.max_frags = TE_V2_FRAG_NONE;
  770. time_cmd.duration = cpu_to_le32(duration);
  771. time_cmd.repeat = 1;
  772. time_cmd.interval = cpu_to_le32(1);
  773. time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
  774. TE_V2_ABSENCE);
  775. return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  776. }