time-event.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  9. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of version 2 of the GNU General Public License as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  23. * USA
  24. *
  25. * The full GNU General Public License is included in this distribution
  26. * in the file called COPYING.
  27. *
  28. * Contact Information:
  29. * Intel Linux Wireless <ilw@linux.intel.com>
  30. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  31. *
  32. * BSD LICENSE
  33. *
  34. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  35. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  36. * All rights reserved.
  37. *
  38. * Redistribution and use in source and binary forms, with or without
  39. * modification, are permitted provided that the following conditions
  40. * are met:
  41. *
  42. * * Redistributions of source code must retain the above copyright
  43. * notice, this list of conditions and the following disclaimer.
  44. * * Redistributions in binary form must reproduce the above copyright
  45. * notice, this list of conditions and the following disclaimer in
  46. * the documentation and/or other materials provided with the
  47. * distribution.
  48. * * Neither the name Intel Corporation nor the names of its
  49. * contributors may be used to endorse or promote products derived
  50. * from this software without specific prior written permission.
  51. *
  52. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  53. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  54. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  55. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  56. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  57. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  58. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  59. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  60. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  61. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  62. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  63. *
  64. *****************************************************************************/
  65. #include <linux/jiffies.h>
  66. #include <net/mac80211.h>
  67. #include "iwl-notif-wait.h"
  68. #include "iwl-trans.h"
  69. #include "fw-api.h"
  70. #include "time-event.h"
  71. #include "mvm.h"
  72. #include "iwl-io.h"
  73. #include "iwl-prph.h"
  74. /*
  75. * For the high priority TE use a time event type that has similar priority to
  76. * the FW's action scan priority.
  77. */
  78. #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
  79. #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
  80. void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
  81. struct iwl_mvm_time_event_data *te_data)
  82. {
  83. lockdep_assert_held(&mvm->time_event_lock);
  84. if (te_data->id == TE_MAX)
  85. return;
  86. list_del(&te_data->list);
  87. te_data->running = false;
  88. te_data->uid = 0;
  89. te_data->id = TE_MAX;
  90. te_data->vif = NULL;
  91. }
  92. void iwl_mvm_roc_done_wk(struct work_struct *wk)
  93. {
  94. struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
  95. u32 queues = 0;
  96. /*
  97. * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
  98. * This will cause the TX path to drop offchannel transmissions.
  99. * That would also be done by mac80211, but it is racy, in particular
  100. * in the case that the time event actually completed in the firmware
  101. * (which is handled in iwl_mvm_te_handle_notif).
  102. */
  103. if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
  104. queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
  105. if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
  106. queues |= BIT(mvm->aux_queue);
  107. iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
  108. synchronize_net();
  109. /*
  110. * Flush the offchannel queue -- this is called when the time
  111. * event finishes or is cancelled, so that frames queued for it
  112. * won't get stuck on the queue and be transmitted in the next
  113. * time event.
  114. * We have to send the command asynchronously since this cannot
  115. * be under the mutex for locking reasons, but that's not an
  116. * issue as it will have to complete before the next command is
  117. * executed, and a new time event means a new command.
  118. */
  119. iwl_mvm_flush_tx_path(mvm, queues, false);
  120. }
  121. static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
  122. {
  123. /*
  124. * Of course, our status bit is just as racy as mac80211, so in
  125. * addition, fire off the work struct which will drop all frames
  126. * from the hardware queues that made it through the race. First
  127. * it will of course synchronize the TX path to make sure that
  128. * any *new* TX will be rejected.
  129. */
  130. schedule_work(&mvm->roc_done_wk);
  131. }
  132. static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
  133. {
  134. struct ieee80211_vif *csa_vif;
  135. rcu_read_lock();
  136. csa_vif = rcu_dereference(mvm->csa_vif);
  137. if (!csa_vif || !csa_vif->csa_active)
  138. goto out_unlock;
  139. IWL_DEBUG_TE(mvm, "CSA NOA started\n");
  140. /*
  141. * CSA NoA is started but we still have beacons to
  142. * transmit on the current channel.
  143. * So we just do nothing here and the switch
  144. * will be performed on the last TBTT.
  145. */
  146. if (!ieee80211_csa_is_complete(csa_vif)) {
  147. IWL_WARN(mvm, "CSA NOA started too early\n");
  148. goto out_unlock;
  149. }
  150. ieee80211_csa_finish(csa_vif);
  151. rcu_read_unlock();
  152. RCU_INIT_POINTER(mvm->csa_vif, NULL);
  153. return;
  154. out_unlock:
  155. rcu_read_unlock();
  156. }
  157. static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
  158. struct ieee80211_vif *vif,
  159. const char *errmsg)
  160. {
  161. if (vif->type != NL80211_IFTYPE_STATION)
  162. return false;
  163. if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
  164. return false;
  165. if (errmsg)
  166. IWL_ERR(mvm, "%s\n", errmsg);
  167. ieee80211_connection_loss(vif);
  168. return true;
  169. }
  170. /*
  171. * Handles a FW notification for an event that is known to the driver.
  172. *
  173. * @mvm: the mvm component
  174. * @te_data: the time event data
  175. * @notif: the notification data corresponding the time event data.
  176. */
  177. static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
  178. struct iwl_mvm_time_event_data *te_data,
  179. struct iwl_time_event_notif *notif)
  180. {
  181. lockdep_assert_held(&mvm->time_event_lock);
  182. IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
  183. le32_to_cpu(notif->unique_id),
  184. le32_to_cpu(notif->action));
  185. /*
  186. * The FW sends the start/end time event notifications even for events
  187. * that it fails to schedule. This is indicated in the status field of
  188. * the notification. This happens in cases that the scheduler cannot
  189. * find a schedule that can handle the event (for example requesting a
  190. * P2P Device discoveribility, while there are other higher priority
  191. * events in the system).
  192. */
  193. if (!le32_to_cpu(notif->status)) {
  194. bool start = le32_to_cpu(notif->action) &
  195. TE_V2_NOTIF_HOST_EVENT_START;
  196. IWL_WARN(mvm, "Time Event %s notification failure\n",
  197. start ? "start" : "end");
  198. if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
  199. iwl_mvm_te_clear_data(mvm, te_data);
  200. return;
  201. }
  202. }
  203. if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
  204. IWL_DEBUG_TE(mvm,
  205. "TE ended - current time %lu, estimated end %lu\n",
  206. jiffies, te_data->end_jiffies);
  207. if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
  208. ieee80211_remain_on_channel_expired(mvm->hw);
  209. iwl_mvm_roc_finished(mvm);
  210. }
  211. /*
  212. * By now, we should have finished association
  213. * and know the dtim period.
  214. */
  215. iwl_mvm_te_check_disconnect(mvm, te_data->vif,
  216. "No association and the time event is over already...");
  217. iwl_mvm_te_clear_data(mvm, te_data);
  218. } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
  219. te_data->running = true;
  220. te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
  221. if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
  222. set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
  223. iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
  224. ieee80211_ready_on_channel(mvm->hw);
  225. } else if (te_data->vif->type == NL80211_IFTYPE_AP) {
  226. if (le32_to_cpu(notif->status))
  227. iwl_mvm_csa_noa_start(mvm);
  228. else
  229. IWL_DEBUG_TE(mvm, "CSA NOA failed to start\n");
  230. /* we don't need it anymore */
  231. iwl_mvm_te_clear_data(mvm, te_data);
  232. }
  233. } else {
  234. IWL_WARN(mvm, "Got TE with unknown action\n");
  235. }
  236. }
  237. /*
  238. * Handle A Aux ROC time event
  239. */
  240. static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
  241. struct iwl_time_event_notif *notif)
  242. {
  243. struct iwl_mvm_time_event_data *te_data, *tmp;
  244. bool aux_roc_te = false;
  245. list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
  246. if (le32_to_cpu(notif->unique_id) == te_data->uid) {
  247. aux_roc_te = true;
  248. break;
  249. }
  250. }
  251. if (!aux_roc_te) /* Not a Aux ROC time event */
  252. return -EINVAL;
  253. if (!le32_to_cpu(notif->status)) {
  254. IWL_DEBUG_TE(mvm,
  255. "ERROR: Aux ROC Time Event %s notification failure\n",
  256. (le32_to_cpu(notif->action) &
  257. TE_V2_NOTIF_HOST_EVENT_START) ? "start" : "end");
  258. return -EINVAL;
  259. }
  260. IWL_DEBUG_TE(mvm,
  261. "Aux ROC time event notification - UID = 0x%x action %d\n",
  262. le32_to_cpu(notif->unique_id),
  263. le32_to_cpu(notif->action));
  264. if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
  265. /* End TE, notify mac80211 */
  266. ieee80211_remain_on_channel_expired(mvm->hw);
  267. iwl_mvm_roc_finished(mvm); /* flush aux queue */
  268. list_del(&te_data->list); /* remove from list */
  269. te_data->running = false;
  270. te_data->vif = NULL;
  271. te_data->uid = 0;
  272. te_data->id = TE_MAX;
  273. } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
  274. set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
  275. te_data->running = true;
  276. ieee80211_ready_on_channel(mvm->hw); /* Start TE */
  277. } else {
  278. IWL_DEBUG_TE(mvm,
  279. "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
  280. le32_to_cpu(notif->action));
  281. return -EINVAL;
  282. }
  283. return 0;
  284. }
  285. /*
  286. * The Rx handler for time event notifications
  287. */
  288. int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
  289. struct iwl_rx_cmd_buffer *rxb,
  290. struct iwl_device_cmd *cmd)
  291. {
  292. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  293. struct iwl_time_event_notif *notif = (void *)pkt->data;
  294. struct iwl_mvm_time_event_data *te_data, *tmp;
  295. IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
  296. le32_to_cpu(notif->unique_id),
  297. le32_to_cpu(notif->action));
  298. spin_lock_bh(&mvm->time_event_lock);
  299. /* This time event is triggered for Aux ROC request */
  300. if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
  301. goto unlock;
  302. list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
  303. if (le32_to_cpu(notif->unique_id) == te_data->uid)
  304. iwl_mvm_te_handle_notif(mvm, te_data, notif);
  305. }
  306. unlock:
  307. spin_unlock_bh(&mvm->time_event_lock);
  308. return 0;
  309. }
  310. static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
  311. struct iwl_rx_packet *pkt, void *data)
  312. {
  313. struct iwl_mvm *mvm =
  314. container_of(notif_wait, struct iwl_mvm, notif_wait);
  315. struct iwl_mvm_time_event_data *te_data = data;
  316. struct iwl_time_event_notif *resp;
  317. int resp_len = iwl_rx_packet_payload_len(pkt);
  318. if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
  319. return true;
  320. if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
  321. IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
  322. return true;
  323. }
  324. resp = (void *)pkt->data;
  325. /* te_data->uid is already set in the TIME_EVENT_CMD response */
  326. if (le32_to_cpu(resp->unique_id) != te_data->uid)
  327. return false;
  328. IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
  329. te_data->uid);
  330. if (!resp->status)
  331. IWL_ERR(mvm,
  332. "TIME_EVENT_NOTIFICATION received but not executed\n");
  333. return true;
  334. }
  335. static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
  336. struct iwl_rx_packet *pkt, void *data)
  337. {
  338. struct iwl_mvm *mvm =
  339. container_of(notif_wait, struct iwl_mvm, notif_wait);
  340. struct iwl_mvm_time_event_data *te_data = data;
  341. struct iwl_time_event_resp *resp;
  342. int resp_len = iwl_rx_packet_payload_len(pkt);
  343. if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
  344. return true;
  345. if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
  346. IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
  347. return true;
  348. }
  349. resp = (void *)pkt->data;
  350. /* we should never get a response to another TIME_EVENT_CMD here */
  351. if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
  352. return false;
  353. te_data->uid = le32_to_cpu(resp->unique_id);
  354. IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
  355. te_data->uid);
  356. return true;
  357. }
  358. static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
  359. struct ieee80211_vif *vif,
  360. struct iwl_mvm_time_event_data *te_data,
  361. struct iwl_time_event_cmd *te_cmd)
  362. {
  363. static const u8 time_event_response[] = { TIME_EVENT_CMD };
  364. struct iwl_notification_wait wait_time_event;
  365. int ret;
  366. lockdep_assert_held(&mvm->mutex);
  367. IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
  368. le32_to_cpu(te_cmd->duration));
  369. spin_lock_bh(&mvm->time_event_lock);
  370. if (WARN_ON(te_data->id != TE_MAX)) {
  371. spin_unlock_bh(&mvm->time_event_lock);
  372. return -EIO;
  373. }
  374. te_data->vif = vif;
  375. te_data->duration = le32_to_cpu(te_cmd->duration);
  376. te_data->id = le32_to_cpu(te_cmd->id);
  377. list_add_tail(&te_data->list, &mvm->time_event_list);
  378. spin_unlock_bh(&mvm->time_event_lock);
  379. /*
  380. * Use a notification wait, which really just processes the
  381. * command response and doesn't wait for anything, in order
  382. * to be able to process the response and get the UID inside
  383. * the RX path. Using CMD_WANT_SKB doesn't work because it
  384. * stores the buffer and then wakes up this thread, by which
  385. * time another notification (that the time event started)
  386. * might already be processed unsuccessfully.
  387. */
  388. iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
  389. time_event_response,
  390. ARRAY_SIZE(time_event_response),
  391. iwl_mvm_time_event_response, te_data);
  392. ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
  393. sizeof(*te_cmd), te_cmd);
  394. if (ret) {
  395. IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
  396. iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
  397. goto out_clear_te;
  398. }
  399. /* No need to wait for anything, so just pass 1 (0 isn't valid) */
  400. ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
  401. /* should never fail */
  402. WARN_ON_ONCE(ret);
  403. if (ret) {
  404. out_clear_te:
  405. spin_lock_bh(&mvm->time_event_lock);
  406. iwl_mvm_te_clear_data(mvm, te_data);
  407. spin_unlock_bh(&mvm->time_event_lock);
  408. }
  409. return ret;
  410. }
  411. void iwl_mvm_protect_session(struct iwl_mvm *mvm,
  412. struct ieee80211_vif *vif,
  413. u32 duration, u32 min_duration,
  414. u32 max_delay, bool wait_for_notif)
  415. {
  416. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  417. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  418. const u8 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
  419. struct iwl_notification_wait wait_te_notif;
  420. struct iwl_time_event_cmd time_cmd = {};
  421. lockdep_assert_held(&mvm->mutex);
  422. if (te_data->running &&
  423. time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
  424. IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
  425. jiffies_to_msecs(te_data->end_jiffies - jiffies));
  426. return;
  427. }
  428. if (te_data->running) {
  429. IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
  430. te_data->uid,
  431. jiffies_to_msecs(te_data->end_jiffies - jiffies));
  432. /*
  433. * we don't have enough time
  434. * cancel the current TE and issue a new one
  435. * Of course it would be better to remove the old one only
  436. * when the new one is added, but we don't care if we are off
  437. * channel for a bit. All we need to do, is not to return
  438. * before we actually begin to be on the channel.
  439. */
  440. iwl_mvm_stop_session_protection(mvm, vif);
  441. }
  442. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
  443. time_cmd.id_and_color =
  444. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  445. time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
  446. time_cmd.apply_time =
  447. cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
  448. time_cmd.max_frags = TE_V2_FRAG_NONE;
  449. time_cmd.max_delay = cpu_to_le32(max_delay);
  450. /* TODO: why do we need to interval = bi if it is not periodic? */
  451. time_cmd.interval = cpu_to_le32(1);
  452. time_cmd.duration = cpu_to_le32(duration);
  453. time_cmd.repeat = 1;
  454. time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
  455. TE_V2_NOTIF_HOST_EVENT_END |
  456. T2_V2_START_IMMEDIATELY);
  457. if (!wait_for_notif) {
  458. iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  459. return;
  460. }
  461. /*
  462. * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
  463. * right after we send the time event
  464. */
  465. iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
  466. te_notif_response,
  467. ARRAY_SIZE(te_notif_response),
  468. iwl_mvm_te_notif, te_data);
  469. /* If TE was sent OK - wait for the notification that started */
  470. if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
  471. IWL_ERR(mvm, "Failed to add TE to protect session\n");
  472. iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
  473. } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
  474. TU_TO_JIFFIES(max_delay))) {
  475. IWL_ERR(mvm, "Failed to protect session until TE\n");
  476. }
  477. }
  478. /*
  479. * Explicit request to remove a time event. The removal of a time event needs to
  480. * be synchronized with the flow of a time event's end notification, which also
  481. * removes the time event from the op mode data structures.
  482. */
  483. void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
  484. struct iwl_mvm_vif *mvmvif,
  485. struct iwl_mvm_time_event_data *te_data)
  486. {
  487. struct iwl_time_event_cmd time_cmd = {};
  488. u32 id, uid;
  489. int ret;
  490. /*
  491. * It is possible that by the time we got to this point the time
  492. * event was already removed.
  493. */
  494. spin_lock_bh(&mvm->time_event_lock);
  495. /* Save time event uid before clearing its data */
  496. uid = te_data->uid;
  497. id = te_data->id;
  498. /*
  499. * The clear_data function handles time events that were already removed
  500. */
  501. iwl_mvm_te_clear_data(mvm, te_data);
  502. spin_unlock_bh(&mvm->time_event_lock);
  503. /*
  504. * It is possible that by the time we try to remove it, the time event
  505. * has already ended and removed. In such a case there is no need to
  506. * send a removal command.
  507. */
  508. if (id == TE_MAX) {
  509. IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", uid);
  510. return;
  511. }
  512. /* When we remove a TE, the UID is to be set in the id field */
  513. time_cmd.id = cpu_to_le32(uid);
  514. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
  515. time_cmd.id_and_color =
  516. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  517. IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
  518. ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
  519. sizeof(time_cmd), &time_cmd);
  520. if (WARN_ON(ret))
  521. return;
  522. }
  523. void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
  524. struct ieee80211_vif *vif)
  525. {
  526. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  527. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  528. lockdep_assert_held(&mvm->mutex);
  529. iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
  530. }
  531. int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  532. int duration, enum ieee80211_roc_type type)
  533. {
  534. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  535. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  536. struct iwl_time_event_cmd time_cmd = {};
  537. lockdep_assert_held(&mvm->mutex);
  538. if (te_data->running) {
  539. IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
  540. return -EBUSY;
  541. }
  542. /*
  543. * Flush the done work, just in case it's still pending, so that
  544. * the work it does can complete and we can accept new frames.
  545. */
  546. flush_work(&mvm->roc_done_wk);
  547. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
  548. time_cmd.id_and_color =
  549. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  550. switch (type) {
  551. case IEEE80211_ROC_TYPE_NORMAL:
  552. time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
  553. break;
  554. case IEEE80211_ROC_TYPE_MGMT_TX:
  555. time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
  556. break;
  557. default:
  558. WARN_ONCE(1, "Got an invalid ROC type\n");
  559. return -EINVAL;
  560. }
  561. time_cmd.apply_time = cpu_to_le32(0);
  562. time_cmd.interval = cpu_to_le32(1);
  563. /*
  564. * The P2P Device TEs can have lower priority than other events
  565. * that are being scheduled by the driver/fw, and thus it might not be
  566. * scheduled. To improve the chances of it being scheduled, allow them
  567. * to be fragmented, and in addition allow them to be delayed.
  568. */
  569. time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
  570. time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
  571. time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
  572. time_cmd.repeat = 1;
  573. time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
  574. TE_V2_NOTIF_HOST_EVENT_END |
  575. T2_V2_START_IMMEDIATELY);
  576. return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  577. }
  578. void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm)
  579. {
  580. struct iwl_mvm_vif *mvmvif;
  581. struct iwl_mvm_time_event_data *te_data;
  582. lockdep_assert_held(&mvm->mutex);
  583. /*
  584. * Iterate over the list of time events and find the time event that is
  585. * associated with a P2P_DEVICE interface.
  586. * This assumes that a P2P_DEVICE interface can have only a single time
  587. * event at any given time and this time event coresponds to a ROC
  588. * request
  589. */
  590. mvmvif = NULL;
  591. spin_lock_bh(&mvm->time_event_lock);
  592. list_for_each_entry(te_data, &mvm->time_event_list, list) {
  593. if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
  594. mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
  595. break;
  596. }
  597. }
  598. spin_unlock_bh(&mvm->time_event_lock);
  599. if (!mvmvif) {
  600. IWL_WARN(mvm, "P2P_DEVICE no remain on channel event\n");
  601. return;
  602. }
  603. iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
  604. iwl_mvm_roc_finished(mvm);
  605. }
  606. int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
  607. struct ieee80211_vif *vif,
  608. u32 duration, u32 apply_time)
  609. {
  610. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  611. struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
  612. struct iwl_time_event_cmd time_cmd = {};
  613. lockdep_assert_held(&mvm->mutex);
  614. if (te_data->running) {
  615. IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
  616. return -EBUSY;
  617. }
  618. time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
  619. time_cmd.id_and_color =
  620. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  621. time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
  622. time_cmd.apply_time = cpu_to_le32(apply_time);
  623. time_cmd.max_frags = TE_V2_FRAG_NONE;
  624. time_cmd.duration = cpu_to_le32(duration);
  625. time_cmd.repeat = 1;
  626. time_cmd.interval = cpu_to_le32(1);
  627. time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
  628. TE_V2_ABSENCE);
  629. return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  630. }