tdls.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2014 Intel Mobile Communications GmbH
  9. * Copyright(c) 2017 Intel Deutschland GmbH
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of version 2 of the GNU General Public License as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  23. * USA
  24. *
  25. * The full GNU General Public License is included in this distribution
  26. * in the file called COPYING.
  27. *
  28. * Contact Information:
  29. * Intel Linux Wireless <linuxwifi@intel.com>
  30. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  31. *
  32. * BSD LICENSE
  33. *
  34. * Copyright(c) 2014 Intel Mobile Communications GmbH
  35. * Copyright(c) 2017 Intel Deutschland GmbH
  36. * All rights reserved.
  37. *
  38. * Redistribution and use in source and binary forms, with or without
  39. * modification, are permitted provided that the following conditions
  40. * are met:
  41. *
  42. * * Redistributions of source code must retain the above copyright
  43. * notice, this list of conditions and the following disclaimer.
  44. * * Redistributions in binary form must reproduce the above copyright
  45. * notice, this list of conditions and the following disclaimer in
  46. * the documentation and/or other materials provided with the
  47. * distribution.
  48. * * Neither the name Intel Corporation nor the names of its
  49. * contributors may be used to endorse or promote products derived
  50. * from this software without specific prior written permission.
  51. *
  52. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  53. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  54. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  55. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  56. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  57. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  58. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  59. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  60. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  61. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  62. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  63. *
  64. *****************************************************************************/
  65. #include <linux/etherdevice.h>
  66. #include "mvm.h"
  67. #include "time-event.h"
  68. #include "iwl-io.h"
  69. #include "iwl-prph.h"
  70. #define TU_TO_US(x) (x * 1024)
  71. #define TU_TO_MS(x) (TU_TO_US(x) / 1000)
  72. void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
  73. {
  74. struct ieee80211_sta *sta;
  75. struct iwl_mvm_sta *mvmsta;
  76. int i;
  77. lockdep_assert_held(&mvm->mutex);
  78. for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
  79. sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
  80. lockdep_is_held(&mvm->mutex));
  81. if (!sta || IS_ERR(sta) || !sta->tdls)
  82. continue;
  83. mvmsta = iwl_mvm_sta_from_mac80211(sta);
  84. ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
  85. NL80211_TDLS_TEARDOWN,
  86. WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
  87. GFP_KERNEL);
  88. }
  89. }
  90. int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  91. {
  92. struct ieee80211_sta *sta;
  93. struct iwl_mvm_sta *mvmsta;
  94. int count = 0;
  95. int i;
  96. lockdep_assert_held(&mvm->mutex);
  97. for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
  98. sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
  99. lockdep_is_held(&mvm->mutex));
  100. if (!sta || IS_ERR(sta) || !sta->tdls)
  101. continue;
  102. if (vif) {
  103. mvmsta = iwl_mvm_sta_from_mac80211(sta);
  104. if (mvmsta->vif != vif)
  105. continue;
  106. }
  107. count++;
  108. }
  109. return count;
  110. }
  111. static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  112. {
  113. struct iwl_rx_packet *pkt;
  114. struct iwl_tdls_config_res *resp;
  115. struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
  116. struct iwl_host_cmd cmd = {
  117. .id = TDLS_CONFIG_CMD,
  118. .flags = CMD_WANT_SKB,
  119. .data = { &tdls_cfg_cmd, },
  120. .len = { sizeof(struct iwl_tdls_config_cmd), },
  121. };
  122. struct ieee80211_sta *sta;
  123. int ret, i, cnt;
  124. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  125. lockdep_assert_held(&mvm->mutex);
  126. tdls_cfg_cmd.id_and_color =
  127. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  128. tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
  129. tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
  130. /* for now the Tx cmd is empty and unused */
  131. /* populate TDLS peer data */
  132. cnt = 0;
  133. for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
  134. sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
  135. lockdep_is_held(&mvm->mutex));
  136. if (IS_ERR_OR_NULL(sta) || !sta->tdls)
  137. continue;
  138. tdls_cfg_cmd.sta_info[cnt].sta_id = i;
  139. tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
  140. IWL_MVM_TDLS_FW_TID;
  141. tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
  142. tdls_cfg_cmd.sta_info[cnt].is_initiator =
  143. cpu_to_le32(sta->tdls_initiator ? 1 : 0);
  144. cnt++;
  145. }
  146. tdls_cfg_cmd.tdls_peer_count = cnt;
  147. IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
  148. ret = iwl_mvm_send_cmd(mvm, &cmd);
  149. if (WARN_ON_ONCE(ret))
  150. return;
  151. pkt = cmd.resp_pkt;
  152. WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
  153. /* we don't really care about the response at this point */
  154. iwl_free_resp(&cmd);
  155. }
  156. void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  157. bool sta_added)
  158. {
  159. int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
  160. /* when the first peer joins, send a power update first */
  161. if (tdls_sta_cnt == 1 && sta_added)
  162. iwl_mvm_power_update_mac(mvm);
  163. /* configure the FW with TDLS peer info */
  164. iwl_mvm_tdls_config(mvm, vif);
  165. /* when the last peer leaves, send a power update last */
  166. if (tdls_sta_cnt == 0 && !sta_added)
  167. iwl_mvm_power_update_mac(mvm);
  168. }
  169. void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
  170. struct ieee80211_vif *vif)
  171. {
  172. struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
  173. u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
  174. /*
  175. * iwl_mvm_protect_session() reads directly from the device
  176. * (the system time), so make sure it is available.
  177. */
  178. if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS))
  179. return;
  180. mutex_lock(&mvm->mutex);
  181. /* Protect the session to hear the TDLS setup response on the channel */
  182. iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
  183. mutex_unlock(&mvm->mutex);
  184. iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
  185. }
  186. static const char *
  187. iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
  188. {
  189. switch (state) {
  190. case IWL_MVM_TDLS_SW_IDLE:
  191. return "IDLE";
  192. case IWL_MVM_TDLS_SW_REQ_SENT:
  193. return "REQ SENT";
  194. case IWL_MVM_TDLS_SW_RESP_RCVD:
  195. return "RESP RECEIVED";
  196. case IWL_MVM_TDLS_SW_REQ_RCVD:
  197. return "REQ RECEIVED";
  198. case IWL_MVM_TDLS_SW_ACTIVE:
  199. return "ACTIVE";
  200. }
  201. return NULL;
  202. }
  203. static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
  204. enum iwl_mvm_tdls_cs_state state)
  205. {
  206. if (mvm->tdls_cs.state == state)
  207. return;
  208. IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
  209. iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
  210. iwl_mvm_tdls_cs_state_str(state));
  211. mvm->tdls_cs.state = state;
  212. /* we only send requests to our switching peer - update sent time */
  213. if (state == IWL_MVM_TDLS_SW_REQ_SENT)
  214. mvm->tdls_cs.peer.sent_timestamp =
  215. iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
  216. if (state == IWL_MVM_TDLS_SW_IDLE)
  217. mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
  218. }
  219. void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
  220. {
  221. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  222. struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
  223. struct ieee80211_sta *sta;
  224. unsigned int delay;
  225. struct iwl_mvm_sta *mvmsta;
  226. struct ieee80211_vif *vif;
  227. u32 sta_id = le32_to_cpu(notif->sta_id);
  228. lockdep_assert_held(&mvm->mutex);
  229. /* can fail sometimes */
  230. if (!le32_to_cpu(notif->status)) {
  231. iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
  232. return;
  233. }
  234. if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
  235. return;
  236. sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
  237. lockdep_is_held(&mvm->mutex));
  238. /* the station may not be here, but if it is, it must be a TDLS peer */
  239. if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
  240. return;
  241. mvmsta = iwl_mvm_sta_from_mac80211(sta);
  242. vif = mvmsta->vif;
  243. /*
  244. * Update state and possibly switch again after this is over (DTIM).
  245. * Also convert TU to msec.
  246. */
  247. delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
  248. mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
  249. msecs_to_jiffies(delay));
  250. iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
  251. }
  252. static int
  253. iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
  254. enum iwl_tdls_channel_switch_type type,
  255. const u8 *peer, bool peer_initiator, u32 timestamp)
  256. {
  257. bool same_peer = false;
  258. int ret = 0;
  259. /* get the existing peer if it's there */
  260. if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
  261. mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
  262. struct ieee80211_sta *sta = rcu_dereference_protected(
  263. mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
  264. lockdep_is_held(&mvm->mutex));
  265. if (!IS_ERR_OR_NULL(sta))
  266. same_peer = ether_addr_equal(peer, sta->addr);
  267. }
  268. switch (mvm->tdls_cs.state) {
  269. case IWL_MVM_TDLS_SW_IDLE:
  270. /*
  271. * might be spurious packet from the peer after the switch is
  272. * already done
  273. */
  274. if (type == TDLS_MOVE_CH)
  275. ret = -EINVAL;
  276. break;
  277. case IWL_MVM_TDLS_SW_REQ_SENT:
  278. /* only allow requests from the same peer */
  279. if (!same_peer)
  280. ret = -EBUSY;
  281. else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
  282. !peer_initiator)
  283. /*
  284. * We received a ch-switch request while an outgoing
  285. * one is pending. Allow it if the peer is the link
  286. * initiator.
  287. */
  288. ret = -EBUSY;
  289. else if (type == TDLS_SEND_CHAN_SW_REQ)
  290. /* wait for idle before sending another request */
  291. ret = -EBUSY;
  292. else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
  293. /* we got a stale response - ignore it */
  294. ret = -EINVAL;
  295. break;
  296. case IWL_MVM_TDLS_SW_RESP_RCVD:
  297. /*
  298. * we are waiting for the FW to give an "active" notification,
  299. * so ignore requests in the meantime
  300. */
  301. ret = -EBUSY;
  302. break;
  303. case IWL_MVM_TDLS_SW_REQ_RCVD:
  304. /* as above, allow the link initiator to proceed */
  305. if (type == TDLS_SEND_CHAN_SW_REQ) {
  306. if (!same_peer)
  307. ret = -EBUSY;
  308. else if (peer_initiator) /* they are the initiator */
  309. ret = -EBUSY;
  310. } else if (type == TDLS_MOVE_CH) {
  311. ret = -EINVAL;
  312. }
  313. break;
  314. case IWL_MVM_TDLS_SW_ACTIVE:
  315. /*
  316. * the only valid request when active is a request to return
  317. * to the base channel by the current off-channel peer
  318. */
  319. if (type != TDLS_MOVE_CH || !same_peer)
  320. ret = -EBUSY;
  321. break;
  322. }
  323. if (ret)
  324. IWL_DEBUG_TDLS(mvm,
  325. "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
  326. type, mvm->tdls_cs.state, peer, same_peer,
  327. peer_initiator);
  328. return ret;
  329. }
  330. static int
  331. iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
  332. struct ieee80211_vif *vif,
  333. enum iwl_tdls_channel_switch_type type,
  334. const u8 *peer, bool peer_initiator,
  335. u8 oper_class,
  336. struct cfg80211_chan_def *chandef,
  337. u32 timestamp, u16 switch_time,
  338. u16 switch_timeout, struct sk_buff *skb,
  339. u32 ch_sw_tm_ie)
  340. {
  341. struct ieee80211_sta *sta;
  342. struct iwl_mvm_sta *mvmsta;
  343. struct ieee80211_tx_info *info;
  344. struct ieee80211_hdr *hdr;
  345. struct iwl_tdls_channel_switch_cmd cmd = {0};
  346. int ret;
  347. lockdep_assert_held(&mvm->mutex);
  348. ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
  349. timestamp);
  350. if (ret)
  351. return ret;
  352. if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
  353. ret = -EINVAL;
  354. goto out;
  355. }
  356. cmd.switch_type = type;
  357. cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
  358. cmd.timing.switch_time = cpu_to_le32(switch_time);
  359. cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
  360. rcu_read_lock();
  361. sta = ieee80211_find_sta(vif, peer);
  362. if (!sta) {
  363. rcu_read_unlock();
  364. ret = -ENOENT;
  365. goto out;
  366. }
  367. mvmsta = iwl_mvm_sta_from_mac80211(sta);
  368. cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
  369. if (!chandef) {
  370. if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
  371. mvm->tdls_cs.peer.chandef.chan) {
  372. /* actually moving to the channel */
  373. chandef = &mvm->tdls_cs.peer.chandef;
  374. } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
  375. type == TDLS_MOVE_CH) {
  376. /* we need to return to base channel */
  377. struct ieee80211_chanctx_conf *chanctx =
  378. rcu_dereference(vif->chanctx_conf);
  379. if (WARN_ON_ONCE(!chanctx)) {
  380. rcu_read_unlock();
  381. goto out;
  382. }
  383. chandef = &chanctx->def;
  384. }
  385. }
  386. if (chandef) {
  387. cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
  388. PHY_BAND_24 : PHY_BAND_5);
  389. cmd.ci.channel = chandef->chan->hw_value;
  390. cmd.ci.width = iwl_mvm_get_channel_width(chandef);
  391. cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
  392. }
  393. /* keep quota calculation simple for now - 50% of DTIM for TDLS */
  394. cmd.timing.max_offchan_duration =
  395. cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
  396. vif->bss_conf.beacon_int) / 2);
  397. /* Switch time is the first element in the switch-timing IE. */
  398. cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
  399. info = IEEE80211_SKB_CB(skb);
  400. hdr = (void *)skb->data;
  401. if (info->control.hw_key) {
  402. if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
  403. rcu_read_unlock();
  404. ret = -EINVAL;
  405. goto out;
  406. }
  407. iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
  408. }
  409. iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
  410. mvmsta->sta_id);
  411. iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
  412. hdr->frame_control);
  413. rcu_read_unlock();
  414. memcpy(cmd.frame.data, skb->data, skb->len);
  415. ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
  416. sizeof(cmd), &cmd);
  417. if (ret) {
  418. IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
  419. ret);
  420. goto out;
  421. }
  422. /* channel switch has started, update state */
  423. if (type != TDLS_MOVE_CH) {
  424. mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
  425. iwl_mvm_tdls_update_cs_state(mvm,
  426. type == TDLS_SEND_CHAN_SW_REQ ?
  427. IWL_MVM_TDLS_SW_REQ_SENT :
  428. IWL_MVM_TDLS_SW_REQ_RCVD);
  429. } else {
  430. iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
  431. }
  432. out:
  433. /* channel switch failed - we are idle */
  434. if (ret)
  435. iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
  436. return ret;
  437. }
  438. void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
  439. {
  440. struct iwl_mvm *mvm;
  441. struct ieee80211_sta *sta;
  442. struct iwl_mvm_sta *mvmsta;
  443. struct ieee80211_vif *vif;
  444. unsigned int delay;
  445. int ret;
  446. mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
  447. mutex_lock(&mvm->mutex);
  448. /* called after an active channel switch has finished or timed-out */
  449. iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
  450. /* station might be gone, in that case do nothing */
  451. if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
  452. goto out;
  453. sta = rcu_dereference_protected(
  454. mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
  455. lockdep_is_held(&mvm->mutex));
  456. /* the station may not be here, but if it is, it must be a TDLS peer */
  457. if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
  458. goto out;
  459. mvmsta = iwl_mvm_sta_from_mac80211(sta);
  460. vif = mvmsta->vif;
  461. ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
  462. TDLS_SEND_CHAN_SW_REQ,
  463. sta->addr,
  464. mvm->tdls_cs.peer.initiator,
  465. mvm->tdls_cs.peer.op_class,
  466. &mvm->tdls_cs.peer.chandef,
  467. 0, 0, 0,
  468. mvm->tdls_cs.peer.skb,
  469. mvm->tdls_cs.peer.ch_sw_tm_ie);
  470. if (ret)
  471. IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
  472. /* retry after a DTIM if we failed sending now */
  473. delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
  474. schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
  475. out:
  476. mutex_unlock(&mvm->mutex);
  477. }
  478. int
  479. iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
  480. struct ieee80211_vif *vif,
  481. struct ieee80211_sta *sta, u8 oper_class,
  482. struct cfg80211_chan_def *chandef,
  483. struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
  484. {
  485. struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
  486. struct iwl_mvm_sta *mvmsta;
  487. unsigned int delay;
  488. int ret;
  489. mutex_lock(&mvm->mutex);
  490. IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
  491. sta->addr, chandef->chan->center_freq, chandef->width);
  492. /* we only support a single peer for channel switching */
  493. if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
  494. IWL_DEBUG_TDLS(mvm,
  495. "Existing peer. Can't start switch with %pM\n",
  496. sta->addr);
  497. ret = -EBUSY;
  498. goto out;
  499. }
  500. ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
  501. TDLS_SEND_CHAN_SW_REQ,
  502. sta->addr, sta->tdls_initiator,
  503. oper_class, chandef, 0, 0, 0,
  504. tmpl_skb, ch_sw_tm_ie);
  505. if (ret)
  506. goto out;
  507. /*
  508. * Mark the peer as "in tdls switch" for this vif. We only allow a
  509. * single such peer per vif.
  510. */
  511. mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
  512. if (!mvm->tdls_cs.peer.skb) {
  513. ret = -ENOMEM;
  514. goto out;
  515. }
  516. mvmsta = iwl_mvm_sta_from_mac80211(sta);
  517. mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
  518. mvm->tdls_cs.peer.chandef = *chandef;
  519. mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
  520. mvm->tdls_cs.peer.op_class = oper_class;
  521. mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
  522. /*
  523. * Wait for 2 DTIM periods before attempting the next switch. The next
  524. * switch will be made sooner if the current one completes before that.
  525. */
  526. delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
  527. vif->bss_conf.beacon_int);
  528. mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
  529. msecs_to_jiffies(delay));
  530. out:
  531. mutex_unlock(&mvm->mutex);
  532. return ret;
  533. }
  534. void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
  535. struct ieee80211_vif *vif,
  536. struct ieee80211_sta *sta)
  537. {
  538. struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
  539. struct ieee80211_sta *cur_sta;
  540. bool wait_for_phy = false;
  541. mutex_lock(&mvm->mutex);
  542. IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
  543. /* we only support a single peer for channel switching */
  544. if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
  545. IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
  546. goto out;
  547. }
  548. cur_sta = rcu_dereference_protected(
  549. mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
  550. lockdep_is_held(&mvm->mutex));
  551. /* make sure it's the same peer */
  552. if (cur_sta != sta)
  553. goto out;
  554. /*
  555. * If we're currently in a switch because of the now canceled peer,
  556. * wait a DTIM here to make sure the phy is back on the base channel.
  557. * We can't otherwise force it.
  558. */
  559. if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
  560. mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
  561. wait_for_phy = true;
  562. mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
  563. dev_kfree_skb(mvm->tdls_cs.peer.skb);
  564. mvm->tdls_cs.peer.skb = NULL;
  565. out:
  566. mutex_unlock(&mvm->mutex);
  567. /* make sure the phy is on the base channel */
  568. if (wait_for_phy)
  569. msleep(TU_TO_MS(vif->bss_conf.dtim_period *
  570. vif->bss_conf.beacon_int));
  571. /* flush the channel switch state */
  572. flush_delayed_work(&mvm->tdls_cs.dwork);
  573. IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
  574. }
  575. void
  576. iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
  577. struct ieee80211_vif *vif,
  578. struct ieee80211_tdls_ch_sw_params *params)
  579. {
  580. struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
  581. enum iwl_tdls_channel_switch_type type;
  582. unsigned int delay;
  583. const char *action_str =
  584. params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
  585. "REQ" : "RESP";
  586. mutex_lock(&mvm->mutex);
  587. IWL_DEBUG_TDLS(mvm,
  588. "Received TDLS ch switch action %s from %pM status %d\n",
  589. action_str, params->sta->addr, params->status);
  590. /*
  591. * we got a non-zero status from a peer we were switching to - move to
  592. * the idle state and retry again later
  593. */
  594. if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
  595. params->status != 0 &&
  596. mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
  597. mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
  598. struct ieee80211_sta *cur_sta;
  599. /* make sure it's the same peer */
  600. cur_sta = rcu_dereference_protected(
  601. mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
  602. lockdep_is_held(&mvm->mutex));
  603. if (cur_sta == params->sta) {
  604. iwl_mvm_tdls_update_cs_state(mvm,
  605. IWL_MVM_TDLS_SW_IDLE);
  606. goto retry;
  607. }
  608. }
  609. type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
  610. TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
  611. iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
  612. params->sta->tdls_initiator, 0,
  613. params->chandef, params->timestamp,
  614. params->switch_time,
  615. params->switch_timeout,
  616. params->tmpl_skb,
  617. params->ch_sw_tm_ie);
  618. retry:
  619. /* register a timeout in case we don't succeed in switching */
  620. delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
  621. 1024 / 1000;
  622. mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
  623. msecs_to_jiffies(delay));
  624. mutex_unlock(&mvm->mutex);
  625. }