tdls.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2014 Intel Mobile Communications GmbH
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  22. * USA
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called COPYING.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <ilw@linux.intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2014 Intel Mobile Communications GmbH
  34. * All rights reserved.
  35. *
  36. * Redistribution and use in source and binary forms, with or without
  37. * modification, are permitted provided that the following conditions
  38. * are met:
  39. *
  40. * * Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * * Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in
  44. * the documentation and/or other materials provided with the
  45. * distribution.
  46. * * Neither the name Intel Corporation nor the names of its
  47. * contributors may be used to endorse or promote products derived
  48. * from this software without specific prior written permission.
  49. *
  50. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  51. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  52. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  53. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  54. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  55. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  56. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  57. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  58. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  59. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  60. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  61. *
  62. *****************************************************************************/
  63. #include <linux/etherdevice.h>
  64. #include "mvm.h"
  65. #include "time-event.h"
  66. #include "iwl-io.h"
  67. #include "iwl-prph.h"
  68. #define TU_TO_US(x) (x * 1024)
  69. #define TU_TO_MS(x) (TU_TO_US(x) / 1000)
  70. void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
  71. {
  72. struct ieee80211_sta *sta;
  73. struct iwl_mvm_sta *mvmsta;
  74. int i;
  75. lockdep_assert_held(&mvm->mutex);
  76. for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
  77. sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
  78. lockdep_is_held(&mvm->mutex));
  79. if (!sta || IS_ERR(sta) || !sta->tdls)
  80. continue;
  81. mvmsta = iwl_mvm_sta_from_mac80211(sta);
  82. ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
  83. NL80211_TDLS_TEARDOWN,
  84. WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
  85. GFP_KERNEL);
  86. }
  87. }
  88. int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  89. {
  90. struct ieee80211_sta *sta;
  91. struct iwl_mvm_sta *mvmsta;
  92. int count = 0;
  93. int i;
  94. lockdep_assert_held(&mvm->mutex);
  95. for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
  96. sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
  97. lockdep_is_held(&mvm->mutex));
  98. if (!sta || IS_ERR(sta) || !sta->tdls)
  99. continue;
  100. if (vif) {
  101. mvmsta = iwl_mvm_sta_from_mac80211(sta);
  102. if (mvmsta->vif != vif)
  103. continue;
  104. }
  105. count++;
  106. }
  107. return count;
  108. }
  109. static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  110. {
  111. struct iwl_rx_packet *pkt;
  112. struct iwl_tdls_config_res *resp;
  113. struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
  114. struct iwl_host_cmd cmd = {
  115. .id = TDLS_CONFIG_CMD,
  116. .flags = CMD_WANT_SKB,
  117. .data = { &tdls_cfg_cmd, },
  118. .len = { sizeof(struct iwl_tdls_config_cmd), },
  119. };
  120. struct ieee80211_sta *sta;
  121. int ret, i, cnt;
  122. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  123. lockdep_assert_held(&mvm->mutex);
  124. tdls_cfg_cmd.id_and_color =
  125. cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  126. tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
  127. tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
  128. /* for now the Tx cmd is empty and unused */
  129. /* populate TDLS peer data */
  130. cnt = 0;
  131. for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
  132. sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
  133. lockdep_is_held(&mvm->mutex));
  134. if (IS_ERR_OR_NULL(sta) || !sta->tdls)
  135. continue;
  136. tdls_cfg_cmd.sta_info[cnt].sta_id = i;
  137. tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
  138. IWL_MVM_TDLS_FW_TID;
  139. tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
  140. tdls_cfg_cmd.sta_info[cnt].is_initiator =
  141. cpu_to_le32(sta->tdls_initiator ? 1 : 0);
  142. cnt++;
  143. }
  144. tdls_cfg_cmd.tdls_peer_count = cnt;
  145. IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
  146. ret = iwl_mvm_send_cmd(mvm, &cmd);
  147. if (WARN_ON_ONCE(ret))
  148. return;
  149. pkt = cmd.resp_pkt;
  150. if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
  151. IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
  152. pkt->hdr.flags);
  153. goto exit;
  154. }
  155. if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
  156. goto exit;
  157. /* we don't really care about the response at this point */
  158. exit:
  159. iwl_free_resp(&cmd);
  160. }
  161. void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  162. bool sta_added)
  163. {
  164. int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
  165. /* when the first peer joins, send a power update first */
  166. if (tdls_sta_cnt == 1 && sta_added)
  167. iwl_mvm_power_update_mac(mvm);
  168. /* configure the FW with TDLS peer info */
  169. iwl_mvm_tdls_config(mvm, vif);
  170. /* when the last peer leaves, send a power update last */
  171. if (tdls_sta_cnt == 0 && !sta_added)
  172. iwl_mvm_power_update_mac(mvm);
  173. }
  174. void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
  175. struct ieee80211_vif *vif)
  176. {
  177. struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
  178. u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
  179. /*
  180. * iwl_mvm_protect_session() reads directly from the device
  181. * (the system time), so make sure it is available.
  182. */
  183. if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS))
  184. return;
  185. mutex_lock(&mvm->mutex);
  186. /* Protect the session to hear the TDLS setup response on the channel */
  187. iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
  188. mutex_unlock(&mvm->mutex);
  189. iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
  190. }
  191. static const char *
  192. iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
  193. {
  194. switch (state) {
  195. case IWL_MVM_TDLS_SW_IDLE:
  196. return "IDLE";
  197. case IWL_MVM_TDLS_SW_REQ_SENT:
  198. return "REQ SENT";
  199. case IWL_MVM_TDLS_SW_RESP_RCVD:
  200. return "RESP RECEIVED";
  201. case IWL_MVM_TDLS_SW_REQ_RCVD:
  202. return "REQ RECEIVED";
  203. case IWL_MVM_TDLS_SW_ACTIVE:
  204. return "ACTIVE";
  205. }
  206. return NULL;
  207. }
  208. static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
  209. enum iwl_mvm_tdls_cs_state state)
  210. {
  211. if (mvm->tdls_cs.state == state)
  212. return;
  213. IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
  214. iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
  215. iwl_mvm_tdls_cs_state_str(state));
  216. mvm->tdls_cs.state = state;
  217. /* we only send requests to our switching peer - update sent time */
  218. if (state == IWL_MVM_TDLS_SW_REQ_SENT)
  219. mvm->tdls_cs.peer.sent_timestamp =
  220. iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
  221. if (state == IWL_MVM_TDLS_SW_IDLE)
  222. mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
  223. }
  224. int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
  225. struct iwl_device_cmd *cmd)
  226. {
  227. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  228. struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
  229. struct ieee80211_sta *sta;
  230. unsigned int delay;
  231. struct iwl_mvm_sta *mvmsta;
  232. struct ieee80211_vif *vif;
  233. u32 sta_id = le32_to_cpu(notif->sta_id);
  234. lockdep_assert_held(&mvm->mutex);
  235. /* can fail sometimes */
  236. if (!le32_to_cpu(notif->status)) {
  237. iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
  238. goto out;
  239. }
  240. if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
  241. goto out;
  242. sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
  243. lockdep_is_held(&mvm->mutex));
  244. /* the station may not be here, but if it is, it must be a TDLS peer */
  245. if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
  246. goto out;
  247. mvmsta = iwl_mvm_sta_from_mac80211(sta);
  248. vif = mvmsta->vif;
  249. /*
  250. * Update state and possibly switch again after this is over (DTIM).
  251. * Also convert TU to msec.
  252. */
  253. delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
  254. mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
  255. msecs_to_jiffies(delay));
  256. iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
  257. out:
  258. return 0;
  259. }
  260. static int
  261. iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
  262. enum iwl_tdls_channel_switch_type type,
  263. const u8 *peer, bool peer_initiator, u32 timestamp)
  264. {
  265. bool same_peer = false;
  266. int ret = 0;
  267. /* get the existing peer if it's there */
  268. if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
  269. mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
  270. struct ieee80211_sta *sta = rcu_dereference_protected(
  271. mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
  272. lockdep_is_held(&mvm->mutex));
  273. if (!IS_ERR_OR_NULL(sta))
  274. same_peer = ether_addr_equal(peer, sta->addr);
  275. }
  276. switch (mvm->tdls_cs.state) {
  277. case IWL_MVM_TDLS_SW_IDLE:
  278. /*
  279. * might be spurious packet from the peer after the switch is
  280. * already done
  281. */
  282. if (type == TDLS_MOVE_CH)
  283. ret = -EINVAL;
  284. break;
  285. case IWL_MVM_TDLS_SW_REQ_SENT:
  286. /* only allow requests from the same peer */
  287. if (!same_peer)
  288. ret = -EBUSY;
  289. else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
  290. !peer_initiator)
  291. /*
  292. * We received a ch-switch request while an outgoing
  293. * one is pending. Allow it if the peer is the link
  294. * initiator.
  295. */
  296. ret = -EBUSY;
  297. else if (type == TDLS_SEND_CHAN_SW_REQ)
  298. /* wait for idle before sending another request */
  299. ret = -EBUSY;
  300. else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
  301. /* we got a stale response - ignore it */
  302. ret = -EINVAL;
  303. break;
  304. case IWL_MVM_TDLS_SW_RESP_RCVD:
  305. /*
  306. * we are waiting for the FW to give an "active" notification,
  307. * so ignore requests in the meantime
  308. */
  309. ret = -EBUSY;
  310. break;
  311. case IWL_MVM_TDLS_SW_REQ_RCVD:
  312. /* as above, allow the link initiator to proceed */
  313. if (type == TDLS_SEND_CHAN_SW_REQ) {
  314. if (!same_peer)
  315. ret = -EBUSY;
  316. else if (peer_initiator) /* they are the initiator */
  317. ret = -EBUSY;
  318. } else if (type == TDLS_MOVE_CH) {
  319. ret = -EINVAL;
  320. }
  321. break;
  322. case IWL_MVM_TDLS_SW_ACTIVE:
  323. /*
  324. * the only valid request when active is a request to return
  325. * to the base channel by the current off-channel peer
  326. */
  327. if (type != TDLS_MOVE_CH || !same_peer)
  328. ret = -EBUSY;
  329. break;
  330. }
  331. if (ret)
  332. IWL_DEBUG_TDLS(mvm,
  333. "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
  334. type, mvm->tdls_cs.state, peer, same_peer,
  335. peer_initiator);
  336. return ret;
  337. }
  338. static int
  339. iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
  340. struct ieee80211_vif *vif,
  341. enum iwl_tdls_channel_switch_type type,
  342. const u8 *peer, bool peer_initiator,
  343. u8 oper_class,
  344. struct cfg80211_chan_def *chandef,
  345. u32 timestamp, u16 switch_time,
  346. u16 switch_timeout, struct sk_buff *skb,
  347. u32 ch_sw_tm_ie)
  348. {
  349. struct ieee80211_sta *sta;
  350. struct iwl_mvm_sta *mvmsta;
  351. struct ieee80211_tx_info *info;
  352. struct ieee80211_hdr *hdr;
  353. struct iwl_tdls_channel_switch_cmd cmd = {0};
  354. int ret;
  355. lockdep_assert_held(&mvm->mutex);
  356. ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
  357. timestamp);
  358. if (ret)
  359. return ret;
  360. if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
  361. ret = -EINVAL;
  362. goto out;
  363. }
  364. cmd.switch_type = type;
  365. cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
  366. cmd.timing.switch_time = cpu_to_le32(switch_time);
  367. cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
  368. rcu_read_lock();
  369. sta = ieee80211_find_sta(vif, peer);
  370. if (!sta) {
  371. rcu_read_unlock();
  372. ret = -ENOENT;
  373. goto out;
  374. }
  375. mvmsta = iwl_mvm_sta_from_mac80211(sta);
  376. cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
  377. if (!chandef) {
  378. if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
  379. mvm->tdls_cs.peer.chandef.chan) {
  380. /* actually moving to the channel */
  381. chandef = &mvm->tdls_cs.peer.chandef;
  382. } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
  383. type == TDLS_MOVE_CH) {
  384. /* we need to return to base channel */
  385. struct ieee80211_chanctx_conf *chanctx =
  386. rcu_dereference(vif->chanctx_conf);
  387. if (WARN_ON_ONCE(!chanctx)) {
  388. rcu_read_unlock();
  389. goto out;
  390. }
  391. chandef = &chanctx->def;
  392. }
  393. }
  394. if (chandef) {
  395. cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
  396. PHY_BAND_24 : PHY_BAND_5);
  397. cmd.ci.channel = chandef->chan->hw_value;
  398. cmd.ci.width = iwl_mvm_get_channel_width(chandef);
  399. cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
  400. }
  401. /* keep quota calculation simple for now - 50% of DTIM for TDLS */
  402. cmd.timing.max_offchan_duration =
  403. cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
  404. vif->bss_conf.beacon_int) / 2);
  405. /* Switch time is the first element in the switch-timing IE. */
  406. cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
  407. info = IEEE80211_SKB_CB(skb);
  408. if (info->control.hw_key)
  409. iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
  410. iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
  411. mvmsta->sta_id);
  412. hdr = (void *)skb->data;
  413. iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
  414. hdr->frame_control);
  415. rcu_read_unlock();
  416. memcpy(cmd.frame.data, skb->data, skb->len);
  417. ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
  418. sizeof(cmd), &cmd);
  419. if (ret) {
  420. IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
  421. ret);
  422. goto out;
  423. }
  424. /* channel switch has started, update state */
  425. if (type != TDLS_MOVE_CH) {
  426. mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
  427. iwl_mvm_tdls_update_cs_state(mvm,
  428. type == TDLS_SEND_CHAN_SW_REQ ?
  429. IWL_MVM_TDLS_SW_REQ_SENT :
  430. IWL_MVM_TDLS_SW_REQ_RCVD);
  431. } else {
  432. iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
  433. }
  434. out:
  435. /* channel switch failed - we are idle */
  436. if (ret)
  437. iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
  438. return ret;
  439. }
  440. void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
  441. {
  442. struct iwl_mvm *mvm;
  443. struct ieee80211_sta *sta;
  444. struct iwl_mvm_sta *mvmsta;
  445. struct ieee80211_vif *vif;
  446. unsigned int delay;
  447. int ret;
  448. mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
  449. mutex_lock(&mvm->mutex);
  450. /* called after an active channel switch has finished or timed-out */
  451. iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
  452. /* station might be gone, in that case do nothing */
  453. if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
  454. goto out;
  455. sta = rcu_dereference_protected(
  456. mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
  457. lockdep_is_held(&mvm->mutex));
  458. /* the station may not be here, but if it is, it must be a TDLS peer */
  459. if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
  460. goto out;
  461. mvmsta = iwl_mvm_sta_from_mac80211(sta);
  462. vif = mvmsta->vif;
  463. ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
  464. TDLS_SEND_CHAN_SW_REQ,
  465. sta->addr,
  466. mvm->tdls_cs.peer.initiator,
  467. mvm->tdls_cs.peer.op_class,
  468. &mvm->tdls_cs.peer.chandef,
  469. 0, 0, 0,
  470. mvm->tdls_cs.peer.skb,
  471. mvm->tdls_cs.peer.ch_sw_tm_ie);
  472. if (ret)
  473. IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
  474. /* retry after a DTIM if we failed sending now */
  475. delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
  476. queue_delayed_work(system_wq, &mvm->tdls_cs.dwork,
  477. msecs_to_jiffies(delay));
  478. out:
  479. mutex_unlock(&mvm->mutex);
  480. }
  481. int
  482. iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
  483. struct ieee80211_vif *vif,
  484. struct ieee80211_sta *sta, u8 oper_class,
  485. struct cfg80211_chan_def *chandef,
  486. struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
  487. {
  488. struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
  489. struct iwl_mvm_sta *mvmsta;
  490. unsigned int delay;
  491. int ret;
  492. mutex_lock(&mvm->mutex);
  493. IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
  494. sta->addr, chandef->chan->center_freq, chandef->width);
  495. /* we only support a single peer for channel switching */
  496. if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
  497. IWL_DEBUG_TDLS(mvm,
  498. "Existing peer. Can't start switch with %pM\n",
  499. sta->addr);
  500. ret = -EBUSY;
  501. goto out;
  502. }
  503. ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
  504. TDLS_SEND_CHAN_SW_REQ,
  505. sta->addr, sta->tdls_initiator,
  506. oper_class, chandef, 0, 0, 0,
  507. tmpl_skb, ch_sw_tm_ie);
  508. if (ret)
  509. goto out;
  510. /*
  511. * Mark the peer as "in tdls switch" for this vif. We only allow a
  512. * single such peer per vif.
  513. */
  514. mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
  515. if (!mvm->tdls_cs.peer.skb) {
  516. ret = -ENOMEM;
  517. goto out;
  518. }
  519. mvmsta = iwl_mvm_sta_from_mac80211(sta);
  520. mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
  521. mvm->tdls_cs.peer.chandef = *chandef;
  522. mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
  523. mvm->tdls_cs.peer.op_class = oper_class;
  524. mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
  525. /*
  526. * Wait for 2 DTIM periods before attempting the next switch. The next
  527. * switch will be made sooner if the current one completes before that.
  528. */
  529. delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
  530. vif->bss_conf.beacon_int);
  531. mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
  532. msecs_to_jiffies(delay));
  533. out:
  534. mutex_unlock(&mvm->mutex);
  535. return ret;
  536. }
  537. void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
  538. struct ieee80211_vif *vif,
  539. struct ieee80211_sta *sta)
  540. {
  541. struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
  542. struct ieee80211_sta *cur_sta;
  543. bool wait_for_phy = false;
  544. mutex_lock(&mvm->mutex);
  545. IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
  546. /* we only support a single peer for channel switching */
  547. if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
  548. IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
  549. goto out;
  550. }
  551. cur_sta = rcu_dereference_protected(
  552. mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
  553. lockdep_is_held(&mvm->mutex));
  554. /* make sure it's the same peer */
  555. if (cur_sta != sta)
  556. goto out;
  557. /*
  558. * If we're currently in a switch because of the now canceled peer,
  559. * wait a DTIM here to make sure the phy is back on the base channel.
  560. * We can't otherwise force it.
  561. */
  562. if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
  563. mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
  564. wait_for_phy = true;
  565. mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
  566. dev_kfree_skb(mvm->tdls_cs.peer.skb);
  567. mvm->tdls_cs.peer.skb = NULL;
  568. out:
  569. mutex_unlock(&mvm->mutex);
  570. /* make sure the phy is on the base channel */
  571. if (wait_for_phy)
  572. msleep(TU_TO_MS(vif->bss_conf.dtim_period *
  573. vif->bss_conf.beacon_int));
  574. /* flush the channel switch state */
  575. flush_delayed_work(&mvm->tdls_cs.dwork);
  576. IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
  577. }
  578. void
  579. iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
  580. struct ieee80211_vif *vif,
  581. struct ieee80211_tdls_ch_sw_params *params)
  582. {
  583. struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
  584. enum iwl_tdls_channel_switch_type type;
  585. unsigned int delay;
  586. const char *action_str =
  587. params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
  588. "REQ" : "RESP";
  589. mutex_lock(&mvm->mutex);
  590. IWL_DEBUG_TDLS(mvm,
  591. "Received TDLS ch switch action %s from %pM status %d\n",
  592. action_str, params->sta->addr, params->status);
  593. /*
  594. * we got a non-zero status from a peer we were switching to - move to
  595. * the idle state and retry again later
  596. */
  597. if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
  598. params->status != 0 &&
  599. mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
  600. mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
  601. struct ieee80211_sta *cur_sta;
  602. /* make sure it's the same peer */
  603. cur_sta = rcu_dereference_protected(
  604. mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
  605. lockdep_is_held(&mvm->mutex));
  606. if (cur_sta == params->sta) {
  607. iwl_mvm_tdls_update_cs_state(mvm,
  608. IWL_MVM_TDLS_SW_IDLE);
  609. goto retry;
  610. }
  611. }
  612. type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
  613. TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
  614. iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
  615. params->sta->tdls_initiator, 0,
  616. params->chandef, params->timestamp,
  617. params->switch_time,
  618. params->switch_timeout,
  619. params->tmpl_skb,
  620. params->ch_sw_tm_ie);
  621. retry:
  622. /* register a timeout in case we don't succeed in switching */
  623. delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
  624. 1024 / 1000;
  625. mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
  626. msecs_to_jiffies(delay));
  627. mutex_unlock(&mvm->mutex);
  628. }