time-event.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  9. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10. * Copyright(c) 2017 Intel Deutschland GmbH
  11. * Copyright(c) 2018 Intel Corporation
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of version 2 of the GNU General Public License as
  15. * published by the Free Software Foundation.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * The full GNU General Public License is included in this distribution
  23. * in the file called COPYING.
  24. *
  25. * Contact Information:
  26. * Intel Linux Wireless <linuxwifi@intel.com>
  27. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  28. *
  29. * BSD LICENSE
  30. *
  31. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  32. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  33. * Copyright(c) 2017 Intel Deutschland GmbH
  34. * Copyright(c) 2018 Intel Corporation
  35. * All rights reserved.
  36. *
  37. * Redistribution and use in source and binary forms, with or without
  38. * modification, are permitted provided that the following conditions
  39. * are met:
  40. *
  41. * * Redistributions of source code must retain the above copyright
  42. * notice, this list of conditions and the following disclaimer.
  43. * * Redistributions in binary form must reproduce the above copyright
  44. * notice, this list of conditions and the following disclaimer in
  45. * the documentation and/or other materials provided with the
  46. * distribution.
  47. * * Neither the name Intel Corporation nor the names of its
  48. * contributors may be used to endorse or promote products derived
  49. * from this software without specific prior written permission.
  50. *
  51. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  52. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  53. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  54. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  55. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  56. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  57. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  58. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  59. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  60. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  61. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  62. *
  63. *****************************************************************************/
  64. #include <linux/jiffies.h>
  65. #include <net/mac80211.h>
  66. #include "fw/notif-wait.h"
  67. #include "iwl-trans.h"
  68. #include "fw-api.h"
  69. #include "time-event.h"
  70. #include "mvm.h"
  71. #include "iwl-io.h"
  72. #include "iwl-prph.h"
  73. /*
  74. * For the high priority TE use a time event type that has similar priority to
  75. * the FW's action scan priority.
  76. */
  77. #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
  78. #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
  79. void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
  80. struct iwl_mvm_time_event_data *te_data)
  81. {
  82. lockdep_assert_held(&mvm->time_event_lock);
  83. if (!te_data->vif)
  84. return;
  85. list_del(&te_data->list);
  86. te_data->running = false;
  87. te_data->uid = 0;
  88. te_data->id = TE_MAX;
  89. te_data->vif = NULL;
  90. }
  91. void iwl_mvm_roc_done_wk(struct work_struct *wk)
  92. {
  93. struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
  94. /*
  95. * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
  96. * This will cause the TX path to drop offchannel transmissions.
  97. * That would also be done by mac80211, but it is racy, in particular
  98. * in the case that the time event actually completed in the firmware
  99. * (which is handled in iwl_mvm_te_handle_notif).
  100. */
  101. if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
  102. iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
  103. if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
  104. iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
  105. synchronize_net();
  106. /*
  107. * Flush the offchannel queue -- this is called when the time
  108. * event finishes or is canceled, so that frames queued for it
  109. * won't get stuck on the queue and be transmitted in the next
  110. * time event.
  111. * We have to send the command asynchronously since this cannot
  112. * be under the mutex for locking reasons, but that's not an
  113. * issue as it will have to complete before the next command is
  114. * executed, and a new time event means a new command.
  115. */
  116. iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
  117. /* Do the same for the P2P device queue (STA) */
  118. if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
  119. struct iwl_mvm_vif *mvmvif;
  120. /*
  121. * NB: access to this pointer would be racy, but the flush bit
  122. * can only be set when we had a P2P-Device VIF, and we have a
  123. * flush of this work in iwl_mvm_prepare_mac_removal() so it's
  124. * not really racy.
  125. */
  126. if (!WARN_ON(!mvm->p2p_device_vif)) {
  127. mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
  128. iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
  129. CMD_ASYNC);
  130. }
  131. }
  132. }
  133. static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
  134. {
  135. /*
  136. * Of course, our status bit is just as racy as mac80211, so in
  137. * addition, fire off the work struct which will drop all frames
  138. * from the hardware queues that made it through the race. First
  139. * it will of course synchronize the TX path to make sure that
  140. * any *new* TX will be rejected.
  141. */
  142. schedule_work(&mvm->roc_done_wk);
  143. }
  144. static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
  145. {
  146. struct ieee80211_vif *csa_vif;
  147. rcu_read_lock();
  148. csa_vif = rcu_dereference(mvm->csa_vif);
  149. if (!csa_vif || !csa_vif->csa_active)
  150. goto out_unlock;
  151. IWL_DEBUG_TE(mvm, "CSA NOA started\n");
  152. /*
  153. * CSA NoA is started but we still have beacons to
  154. * transmit on the current channel.
  155. * So we just do nothing here and the switch
  156. * will be performed on the last TBTT.
  157. */
  158. if (!ieee80211_csa_is_complete(csa_vif)) {
  159. IWL_WARN(mvm, "CSA NOA started too early\n");
  160. goto out_unlock;
  161. }
  162. ieee80211_csa_finish(csa_vif);
  163. rcu_read_unlock();
  164. RCU_INIT_POINTER(mvm->csa_vif, NULL);
  165. return;
  166. out_unlock:
  167. rcu_read_unlock();
  168. }
  169. static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
  170. struct ieee80211_vif *vif,
  171. const char *errmsg)
  172. {
  173. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  174. if (vif->type != NL80211_IFTYPE_STATION)
  175. return false;
  176. if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc &&
  177. vif->bss_conf.dtim_period)
  178. return false;
  179. if (errmsg)
  180. IWL_ERR(mvm, "%s\n", errmsg);
  181. iwl_mvm_connection_loss(mvm, vif, errmsg);
  182. return true;
  183. }
  184. static void
  185. iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
  186. struct iwl_mvm_time_event_data *te_data,
  187. struct iwl_time_event_notif *notif)
  188. {
  189. struct ieee80211_vif *vif = te_data->vif;
  190. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  191. if (!notif->status)
  192. IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
  193. switch (te_data->vif->type) {
  194. case NL80211_IFTYPE_AP:
  195. if (!notif->status)
  196. mvmvif->csa_failed = true;
  197. iwl_mvm_csa_noa_start(mvm);
  198. break;
  199. case NL80211_IFTYPE_STATION:
  200. if (!notif->status) {
  201. iwl_mvm_connection_loss(mvm, vif,
  202. "CSA TE failed to start");
  203. break;
  204. }
  205. iwl_mvm_csa_client_absent(mvm, te_data->vif);
  206. ieee80211_chswitch_done(te_data->vif, true);
  207. break;
  208. default:
  209. /* should never happen */
  210. WARN_ON_ONCE(1);
  211. break;
  212. }
  213. /* we don't need it anymore */
  214. iwl_mvm_te_clear_data(mvm, te_data);
  215. }
  216. static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
  217. struct iwl_time_event_notif *notif,
  218. struct iwl_mvm_time_event_data *te_data)
  219. {
  220. struct iwl_fw_dbg_trigger_tlv *trig;
  221. struct iwl_fw_dbg_trigger_time_event *te_trig;
  222. int i;
  223. if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
  224. return;
  225. trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
  226. te_trig = (void *)trig->data;
  227. if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
  228. ieee80211_vif_to_wdev(te_data->vif),
  229. trig))
  230. return;
  231. for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
  232. u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
  233. u32 trig_action_bitmap =
  234. le32_to_cpu(te_trig->time_events[i].action_bitmap);
  235. u32 trig_status_bitmap =
  236. le32_to_cpu(te_trig->time_events[i].status_bitmap);
  237. if (trig_te_id != te_data->id ||
  238. !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
  239. !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
  240. continue;
  241. iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
  242. "Time event %d Action 0x%x received status: %d",
  243. te_data->id,
  244. le32_to_cpu(notif->action),
  245. le32_to_cpu(notif->status));
  246. break;
  247. }
  248. }
  249. /*
  250. * Handles a FW notification for an event that is known to the driver.
  251. *
  252. * @mvm: the mvm component
  253. * @te_data: the time event data
  254. * @notif: the notification data corresponding the time event data.
  255. */
  256. static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
  257. struct iwl_mvm_time_event_data *te_data,
  258. struct iwl_time_event_notif *notif)
  259. {
  260. lockdep_assert_held(&mvm->time_event_lock);
  261. IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
  262. le32_to_cpu(notif->unique_id),
  263. le32_to_cpu(notif->action));
  264. iwl_mvm_te_check_trigger(mvm, notif, te_data);
  265. /*
  266. * The FW sends the start/end time event notifications even for events
  267. * that it fails to schedule. This is indicated in the status field of
  268. * the notification. This happens in cases that the scheduler cannot
  269. * find a schedule that can handle the event (for example requesting a
  270. * P2P Device discoveribility, while there are other higher priority
  271. * events in the system).
  272. */
  273. if (!le32_to_cpu(notif->status)) {
  274. const char *msg;
  275. if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
  276. msg = "Time Event start notification failure";
  277. else
  278. msg = "Time Event end notification failure";
  279. IWL_DEBUG_TE(mvm, "%s\n", msg);
  280. if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
  281. iwl_mvm_te_clear_data(mvm, te_data);
  282. return;
  283. }
  284. }
  285. if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
  286. IWL_DEBUG_TE(mvm,
  287. "TE ended - current time %lu, estimated end %lu\n",
  288. jiffies, te_data->end_jiffies);
  289. switch (te_data->vif->type) {
  290. case NL80211_IFTYPE_P2P_DEVICE:
  291. ieee80211_remain_on_channel_expired(mvm->hw);
  292. iwl_mvm_roc_finished(mvm);
  293. break;
  294. case NL80211_IFTYPE_STATION:
  295. /*
  296. * By now, we should have finished association
  297. * and know the dtim period.
  298. */
  299. iwl_mvm_te_check_disconnect(mvm, te_data->vif,
  300. "No beacon heard and the time event is over already...");
  301. break;
  302. default:
  303. break;
  304. }
  305. iwl_mvm_te_clear_data(mvm, te_data);
  306. } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
  307. te_data->running = true;
  308. te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
  309. if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
  310. set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
  311. iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
  312. ieee80211_ready_on_channel(mvm->hw);
  313. } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
  314. iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
  315. }
  316. } else {
  317. IWL_WARN(mvm, "Got TE with unknown action\n");
  318. }
  319. }
  320. /*
  321. * Handle A Aux ROC time event
  322. */
  323. static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
  324. struct iwl_time_event_notif *notif)
  325. {
  326. struct iwl_mvm_time_event_data *te_data, *tmp;
  327. bool aux_roc_te = false;
  328. list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
  329. if (le32_to_cpu(notif->unique_id) == te_data->uid) {
  330. aux_roc_te = true;
  331. break;
  332. }
  333. }
  334. if (!aux_roc_te) /* Not a Aux ROC time event */
  335. return -EINVAL;
  336. iwl_mvm_te_check_trigger(mvm, notif, te_data);
  337. IWL_DEBUG_TE(mvm,
  338. "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n",
  339. le32_to_cpu(notif->unique_id),
  340. le32_to_cpu(notif->action), le32_to_cpu(notif->status));
  341. if (!le32_to_cpu(notif->status) ||
  342. le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
  343. /* End TE, notify mac80211 */
  344. ieee80211_remain_on_channel_expired(mvm->hw);
  345. iwl_mvm_roc_finished(mvm); /* flush aux queue */
  346. list_del(&te_data->list); /* remove from list */
  347. te_data->running = false;
  348. te_data->vif = NULL;
  349. te_data->uid = 0;
  350. te_data->id = TE_MAX;
  351. } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
  352. set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
  353. te_data->running = true;
  354. iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
  355. ieee80211_ready_on_channel(mvm->hw); /* Start TE */
  356. } else {
  357. IWL_DEBUG_TE(mvm,
  358. "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
  359. le32_to_cpu(notif->action));
  360. return -EINVAL;
  361. }
  362. return 0;
  363. }
  364. /*
  365. * The Rx handler for time event notifications
  366. */
  367. void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
  368. struct iwl_rx_cmd_buffer *rxb)
  369. {
  370. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  371. struct iwl_time_event_notif *notif = (void *)pkt->data;
  372. struct iwl_mvm_time_event_data *te_data, *tmp;
  373. IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
  374. le32_to_cpu(notif->unique_id),
  375. le32_to_cpu(notif->action));
  376. spin_lock_bh(&mvm->time_event_lock);
  377. /* This time event is triggered for Aux ROC request */
  378. if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
  379. goto unlock;
  380. list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
  381. if (le32_to_cpu(notif->unique_id) == te_data->uid)
  382. iwl_mvm_te_handle_notif(mvm, te_data, notif);
  383. }
  384. unlock:
  385. spin_unlock_bh(&mvm->time_event_lock);
  386. }
  387. static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
  388. struct iwl_rx_packet *pkt, void *data)
  389. {
  390. struct iwl_mvm *mvm =
  391. container_of(notif_wait, struct iwl_mvm, notif_wait);
  392. struct iwl_mvm_time_event_data *te_data = data;
  393. struct iwl_time_event_notif *resp;
  394. int resp_len = iwl_rx_packet_payload_len(pkt);
  395. if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
  396. return true;
  397. if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
  398. IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
  399. return true;
  400. }
  401. resp = (void *)pkt->data;
  402. /* te_data->uid is already set in the TIME_EVENT_CMD response */
  403. if (le32_to_cpu(resp->unique_id) != te_data->uid)
  404. return false;
  405. IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
  406. te_data->uid);
  407. if (!resp->status)
  408. IWL_ERR(mvm,
  409. "TIME_EVENT_NOTIFICATION received but not executed\n");
  410. return true;
  411. }
  412. static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
  413. struct iwl_rx_packet *pkt, void *data)
  414. {
  415. struct iwl_mvm *mvm =
  416. container_of(notif_wait, struct iwl_mvm, notif_wait);
  417. struct iwl_mvm_time_event_data *te_data = data;
  418. struct iwl_time_event_resp *resp;
  419. int resp_len = iwl_rx_packet_payload_len(pkt);
  420. if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
  421. return true;
  422. if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
  423. IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
  424. return true;
  425. }
  426. resp = (void *)pkt->data;
  427. /* we should never get a response to another TIME_EVENT_CMD here */
  428. if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
  429. return false;
  430. te_data->uid = le32_to_cpu(resp->unique_id);
  431. IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
  432. te_data->uid);
  433. return true;
  434. }
  435. static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
  436. struct ieee80211_vif *vif,
  437. struct iwl_mvm_time_event_data *te_data,
  438. struct iwl_time_event_cmd *te_cmd)
  439. {
  440. static const u16 time_event_response[] = { TIME_EVENT_CMD };
  441. struct iwl_notification_wait wait_time_event;
  442. int ret;
  443. lockdep_assert_held(&mvm->mutex);
  444. IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
  445. le32_to_cpu(te_cmd->duration));
  446. spin_lock_bh(&mvm->time_event_lock);
  447. if (WARN_ON(te_data->id != TE_MAX)) {
  448. spin_unlock_bh(&mvm->time_event_lock);
  449. return -EIO;
  450. }
  451. te_data->vif = vif;
  452. te_data->duration = le32_to_cpu(te_cmd->duration);
  453. te_data->id = le32_to_cpu(te_cmd->id);
  454. list_add_tail(&te_data->list, &mvm->time_event_list);
  455. spin_unlock_bh(&mvm->time_event_lock);
  456. /*
  457. * Use a notification wait, which really just processes the
  458. * command response and doesn't wait for anything, in order
  459. * to be able to process the response and get the UID inside
  460. * the RX path. Using CMD_WANT_SKB doesn't work because it
  461. * stores the buffer and then wakes up this thread, by which
  462. * time another notification (that the time event started)
  463. * might already be processed unsuccessfully.
  464. */
  465. iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
  466. time_event_response,
  467. ARRAY_SIZE(time_event_response),
  468. iwl_mvm_time_event_response, te_data);
  469. ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
  470. sizeof(*te_cmd), te_cmd);
  471. if (ret) {
  472. IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
  473. iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
  474. goto out_clear_te;
  475. }
  476. /* No need to wait for anything, so just pass 1 (0 isn't valid) */
  477. ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
  478. /* should never fail */
  479. WARN_ON_ONCE(ret);
  480. if (ret) {
  481. out_clear_te:
  482. spin_lock_bh(&mvm->time_event_lock);
  483. iwl_mvm_te_clear_data(mvm, te_data);
  484. spin_unlock_bh(&mvm->time_event_lock);
  485. }
  486. return ret;
  487. }
  488. void iwl_mvm_protect_session(struct iwl_mvm *mvm,
  489. struct ieee80211_vif *vif,
  490. u32 duration, u32 min_duration,
  491. u32 max_delay, bool wait_for_notif)
  492. {
  493. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  494. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  495. const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
  496. struct iwl_notification_wait wait_te_notif;
  497. struct iwl_time_event_cmd time_cmd = {};
  498. lockdep_assert_held(&mvm->mutex);
  499. if (te_data->running &&
  500. time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
  501. IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
  502. jiffies_to_msecs(te_data->end_jiffies - jiffies));
  503. return;
  504. }
  505. if (te_data->running) {
  506. IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
  507. te_data->uid,
  508. jiffies_to_msecs(te_data->end_jiffies - jiffies));
  509. /*
  510. * we don't have enough time
  511. * cancel the current TE and issue a new one
  512. * Of course it would be better to remove the old one only
  513. * when the new one is added, but we don't care if we are off
  514. * channel for a bit. All we need to do, is not to return
  515. * before we actually begin to be on the channel.
  516. */
  517. iwl_mvm_stop_session_protection(mvm, vif);
  518. }
  519. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
  520. time_cmd.id_and_color =
  521. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  522. time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
  523. time_cmd.apply_time = cpu_to_le32(0);
  524. time_cmd.max_frags = TE_V2_FRAG_NONE;
  525. time_cmd.max_delay = cpu_to_le32(max_delay);
  526. /* TODO: why do we need to interval = bi if it is not periodic? */
  527. time_cmd.interval = cpu_to_le32(1);
  528. time_cmd.duration = cpu_to_le32(duration);
  529. time_cmd.repeat = 1;
  530. time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
  531. TE_V2_NOTIF_HOST_EVENT_END |
  532. TE_V2_START_IMMEDIATELY);
  533. if (!wait_for_notif) {
  534. iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  535. return;
  536. }
  537. /*
  538. * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
  539. * right after we send the time event
  540. */
  541. iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
  542. te_notif_response,
  543. ARRAY_SIZE(te_notif_response),
  544. iwl_mvm_te_notif, te_data);
  545. /* If TE was sent OK - wait for the notification that started */
  546. if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
  547. IWL_ERR(mvm, "Failed to add TE to protect session\n");
  548. iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
  549. } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
  550. TU_TO_JIFFIES(max_delay))) {
  551. IWL_ERR(mvm, "Failed to protect session until TE\n");
  552. }
  553. }
  554. static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
  555. struct iwl_mvm_time_event_data *te_data,
  556. u32 *uid)
  557. {
  558. u32 id;
  559. /*
  560. * It is possible that by the time we got to this point the time
  561. * event was already removed.
  562. */
  563. spin_lock_bh(&mvm->time_event_lock);
  564. /* Save time event uid before clearing its data */
  565. *uid = te_data->uid;
  566. id = te_data->id;
  567. /*
  568. * The clear_data function handles time events that were already removed
  569. */
  570. iwl_mvm_te_clear_data(mvm, te_data);
  571. spin_unlock_bh(&mvm->time_event_lock);
  572. /*
  573. * It is possible that by the time we try to remove it, the time event
  574. * has already ended and removed. In such a case there is no need to
  575. * send a removal command.
  576. */
  577. if (id == TE_MAX) {
  578. IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
  579. return false;
  580. }
  581. return true;
  582. }
  583. /*
  584. * Explicit request to remove a aux roc time event. The removal of a time
  585. * event needs to be synchronized with the flow of a time event's end
  586. * notification, which also removes the time event from the op mode
  587. * data structures.
  588. */
  589. static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
  590. struct iwl_mvm_vif *mvmvif,
  591. struct iwl_mvm_time_event_data *te_data)
  592. {
  593. struct iwl_hs20_roc_req aux_cmd = {};
  594. u32 uid;
  595. int ret;
  596. if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
  597. return;
  598. aux_cmd.event_unique_id = cpu_to_le32(uid);
  599. aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
  600. aux_cmd.id_and_color =
  601. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  602. IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
  603. le32_to_cpu(aux_cmd.event_unique_id));
  604. ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
  605. sizeof(aux_cmd), &aux_cmd);
  606. if (WARN_ON(ret))
  607. return;
  608. }
  609. /*
  610. * Explicit request to remove a time event. The removal of a time event needs to
  611. * be synchronized with the flow of a time event's end notification, which also
  612. * removes the time event from the op mode data structures.
  613. */
  614. void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
  615. struct iwl_mvm_vif *mvmvif,
  616. struct iwl_mvm_time_event_data *te_data)
  617. {
  618. struct iwl_time_event_cmd time_cmd = {};
  619. u32 uid;
  620. int ret;
  621. if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
  622. return;
  623. /* When we remove a TE, the UID is to be set in the id field */
  624. time_cmd.id = cpu_to_le32(uid);
  625. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
  626. time_cmd.id_and_color =
  627. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  628. IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
  629. ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
  630. sizeof(time_cmd), &time_cmd);
  631. if (WARN_ON(ret))
  632. return;
  633. }
  634. void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
  635. struct ieee80211_vif *vif)
  636. {
  637. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  638. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  639. u32 id;
  640. lockdep_assert_held(&mvm->mutex);
  641. spin_lock_bh(&mvm->time_event_lock);
  642. id = te_data->id;
  643. spin_unlock_bh(&mvm->time_event_lock);
  644. if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
  645. IWL_DEBUG_TE(mvm,
  646. "don't remove TE with id=%u (not session protection)\n",
  647. id);
  648. return;
  649. }
  650. iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
  651. }
  652. int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  653. int duration, enum ieee80211_roc_type type)
  654. {
  655. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  656. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  657. struct iwl_time_event_cmd time_cmd = {};
  658. lockdep_assert_held(&mvm->mutex);
  659. if (te_data->running) {
  660. IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
  661. return -EBUSY;
  662. }
  663. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
  664. time_cmd.id_and_color =
  665. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  666. switch (type) {
  667. case IEEE80211_ROC_TYPE_NORMAL:
  668. time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
  669. break;
  670. case IEEE80211_ROC_TYPE_MGMT_TX:
  671. time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
  672. break;
  673. default:
  674. WARN_ONCE(1, "Got an invalid ROC type\n");
  675. return -EINVAL;
  676. }
  677. time_cmd.apply_time = cpu_to_le32(0);
  678. time_cmd.interval = cpu_to_le32(1);
  679. /*
  680. * The P2P Device TEs can have lower priority than other events
  681. * that are being scheduled by the driver/fw, and thus it might not be
  682. * scheduled. To improve the chances of it being scheduled, allow them
  683. * to be fragmented, and in addition allow them to be delayed.
  684. */
  685. time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
  686. time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
  687. time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
  688. time_cmd.repeat = 1;
  689. time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
  690. TE_V2_NOTIF_HOST_EVENT_END |
  691. TE_V2_START_IMMEDIATELY);
  692. return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  693. }
  694. static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
  695. {
  696. struct iwl_mvm_time_event_data *te_data;
  697. lockdep_assert_held(&mvm->mutex);
  698. spin_lock_bh(&mvm->time_event_lock);
  699. /*
  700. * Iterate over the list of time events and find the time event that is
  701. * associated with a P2P_DEVICE interface.
  702. * This assumes that a P2P_DEVICE interface can have only a single time
  703. * event at any given time and this time event coresponds to a ROC
  704. * request
  705. */
  706. list_for_each_entry(te_data, &mvm->time_event_list, list) {
  707. if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
  708. goto out;
  709. }
  710. /* There can only be at most one AUX ROC time event, we just use the
  711. * list to simplify/unify code. Remove it if it exists.
  712. */
  713. te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
  714. struct iwl_mvm_time_event_data,
  715. list);
  716. out:
  717. spin_unlock_bh(&mvm->time_event_lock);
  718. return te_data;
  719. }
  720. void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
  721. {
  722. struct iwl_mvm_time_event_data *te_data;
  723. u32 uid;
  724. te_data = iwl_mvm_get_roc_te(mvm);
  725. if (te_data)
  726. __iwl_mvm_remove_time_event(mvm, te_data, &uid);
  727. }
  728. void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
  729. {
  730. struct iwl_mvm_vif *mvmvif;
  731. struct iwl_mvm_time_event_data *te_data;
  732. te_data = iwl_mvm_get_roc_te(mvm);
  733. if (!te_data) {
  734. IWL_WARN(mvm, "No remain on channel event\n");
  735. return;
  736. }
  737. mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
  738. if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
  739. iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
  740. set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
  741. } else {
  742. iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
  743. }
  744. iwl_mvm_roc_finished(mvm);
  745. }
  746. int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
  747. struct ieee80211_vif *vif,
  748. u32 duration, u32 apply_time)
  749. {
  750. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  751. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  752. struct iwl_time_event_cmd time_cmd = {};
  753. lockdep_assert_held(&mvm->mutex);
  754. if (te_data->running) {
  755. u32 id;
  756. spin_lock_bh(&mvm->time_event_lock);
  757. id = te_data->id;
  758. spin_unlock_bh(&mvm->time_event_lock);
  759. if (id == TE_CHANNEL_SWITCH_PERIOD) {
  760. IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
  761. return -EBUSY;
  762. }
  763. /*
  764. * Remove the session protection time event to allow the
  765. * channel switch. If we got here, we just heard a beacon so
  766. * the session protection is not needed anymore anyway.
  767. */
  768. iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
  769. }
  770. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
  771. time_cmd.id_and_color =
  772. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  773. time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
  774. time_cmd.apply_time = cpu_to_le32(apply_time);
  775. time_cmd.max_frags = TE_V2_FRAG_NONE;
  776. time_cmd.duration = cpu_to_le32(duration);
  777. time_cmd.repeat = 1;
  778. time_cmd.interval = cpu_to_le32(1);
  779. time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
  780. TE_V2_ABSENCE);
  781. if (!apply_time)
  782. time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
  783. return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  784. }