scan.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  9. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of version 2 of the GNU General Public License as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  23. * USA
  24. *
  25. * The full GNU General Public License is included in this distribution
  26. * in the file called COPYING.
  27. *
  28. * Contact Information:
  29. * Intel Linux Wireless <ilw@linux.intel.com>
  30. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  31. *
  32. * BSD LICENSE
  33. *
  34. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  35. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  36. * All rights reserved.
  37. *
  38. * Redistribution and use in source and binary forms, with or without
  39. * modification, are permitted provided that the following conditions
  40. * are met:
  41. *
  42. * * Redistributions of source code must retain the above copyright
  43. * notice, this list of conditions and the following disclaimer.
  44. * * Redistributions in binary form must reproduce the above copyright
  45. * notice, this list of conditions and the following disclaimer in
  46. * the documentation and/or other materials provided with the
  47. * distribution.
  48. * * Neither the name Intel Corporation nor the names of its
  49. * contributors may be used to endorse or promote products derived
  50. * from this software without specific prior written permission.
  51. *
  52. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  53. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  54. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  55. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  56. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  57. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  58. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  59. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  60. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  61. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  62. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  63. *
  64. *****************************************************************************/
  65. #include <linux/etherdevice.h>
  66. #include <net/mac80211.h>
  67. #include "mvm.h"
  68. #include "iwl-eeprom-parse.h"
  69. #include "fw-api-scan.h"
  70. #define IWL_PLCP_QUIET_THRESH 1
  71. #define IWL_ACTIVE_QUIET_TIME 10
  72. struct iwl_mvm_scan_params {
  73. u32 max_out_time;
  74. u32 suspend_time;
  75. bool passive_fragmented;
  76. struct _dwell {
  77. u16 passive;
  78. u16 active;
  79. } dwell[IEEE80211_NUM_BANDS];
  80. };
  81. enum iwl_umac_scan_uid_type {
  82. IWL_UMAC_SCAN_UID_REG_SCAN = BIT(0),
  83. IWL_UMAC_SCAN_UID_SCHED_SCAN = BIT(1),
  84. IWL_UMAC_SCAN_UID_ALL = IWL_UMAC_SCAN_UID_REG_SCAN |
  85. IWL_UMAC_SCAN_UID_SCHED_SCAN,
  86. };
  87. static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
  88. enum iwl_umac_scan_uid_type type, bool notify);
  89. static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
  90. {
  91. if (mvm->scan_rx_ant != ANT_NONE)
  92. return mvm->scan_rx_ant;
  93. return mvm->fw->valid_rx_ant;
  94. }
  95. static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
  96. {
  97. u16 rx_chain;
  98. u8 rx_ant;
  99. rx_ant = iwl_mvm_scan_rx_ant(mvm);
  100. rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
  101. rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
  102. rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
  103. rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
  104. return cpu_to_le16(rx_chain);
  105. }
  106. static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band)
  107. {
  108. if (band == IEEE80211_BAND_2GHZ)
  109. return cpu_to_le32(PHY_BAND_24);
  110. else
  111. return cpu_to_le32(PHY_BAND_5);
  112. }
  113. static inline __le32
  114. iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
  115. bool no_cck)
  116. {
  117. u32 tx_ant;
  118. mvm->scan_last_antenna_idx =
  119. iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
  120. mvm->scan_last_antenna_idx);
  121. tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
  122. if (band == IEEE80211_BAND_2GHZ && !no_cck)
  123. return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
  124. tx_ant);
  125. else
  126. return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
  127. }
  128. /*
  129. * We insert the SSIDs in an inverted order, because the FW will
  130. * invert it back. The most prioritized SSID, which is first in the
  131. * request list, is not copied here, but inserted directly to the probe
  132. * request.
  133. */
  134. static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
  135. struct cfg80211_ssid *ssids,
  136. int n_ssids, int first)
  137. {
  138. int fw_idx, req_idx;
  139. for (req_idx = n_ssids - 1, fw_idx = 0; req_idx >= first;
  140. req_idx--, fw_idx++) {
  141. cmd_ssid[fw_idx].id = WLAN_EID_SSID;
  142. cmd_ssid[fw_idx].len = ssids[req_idx].ssid_len;
  143. memcpy(cmd_ssid[fw_idx].ssid,
  144. ssids[req_idx].ssid,
  145. ssids[req_idx].ssid_len);
  146. }
  147. }
  148. /*
  149. * If req->n_ssids > 0, it means we should do an active scan.
  150. * In case of active scan w/o directed scan, we receive a zero-length SSID
  151. * just to notify that this scan is active and not passive.
  152. * In order to notify the FW of the number of SSIDs we wish to scan (including
  153. * the zero-length one), we need to set the corresponding bits in chan->type,
  154. * one for each SSID, and set the active bit (first). If the first SSID is
  155. * already included in the probe template, so we need to set only
  156. * req->n_ssids - 1 bits in addition to the first bit.
  157. */
  158. static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
  159. enum ieee80211_band band, int n_ssids)
  160. {
  161. if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
  162. return 10;
  163. if (band == IEEE80211_BAND_2GHZ)
  164. return 20 + 3 * (n_ssids + 1);
  165. return 10 + 2 * (n_ssids + 1);
  166. }
  167. static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
  168. enum ieee80211_band band)
  169. {
  170. if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
  171. return 110;
  172. return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
  173. }
  174. static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
  175. struct cfg80211_scan_request *req,
  176. bool basic_ssid,
  177. struct iwl_mvm_scan_params *params)
  178. {
  179. struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
  180. (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
  181. int i;
  182. int type = BIT(req->n_ssids) - 1;
  183. enum ieee80211_band band = req->channels[0]->band;
  184. if (!basic_ssid)
  185. type |= BIT(req->n_ssids);
  186. for (i = 0; i < cmd->channel_count; i++) {
  187. chan->channel = cpu_to_le16(req->channels[i]->hw_value);
  188. chan->type = cpu_to_le32(type);
  189. if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
  190. chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
  191. chan->active_dwell = cpu_to_le16(params->dwell[band].active);
  192. chan->passive_dwell = cpu_to_le16(params->dwell[band].passive);
  193. chan->iteration_count = cpu_to_le16(1);
  194. chan++;
  195. }
  196. }
  197. /*
  198. * Fill in probe request with the following parameters:
  199. * TA is our vif HW address, which mac80211 ensures we have.
  200. * Packet is broadcasted, so this is both SA and DA.
  201. * The probe request IE is made out of two: first comes the most prioritized
  202. * SSID if a directed scan is requested. Second comes whatever extra
  203. * information was given to us as the scan request IE.
  204. */
  205. static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
  206. int n_ssids, const u8 *ssid, int ssid_len,
  207. const u8 *band_ie, int band_ie_len,
  208. const u8 *common_ie, int common_ie_len,
  209. int left)
  210. {
  211. int len = 0;
  212. u8 *pos = NULL;
  213. /* Make sure there is enough space for the probe request,
  214. * two mandatory IEs and the data */
  215. left -= 24;
  216. if (left < 0)
  217. return 0;
  218. frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
  219. eth_broadcast_addr(frame->da);
  220. memcpy(frame->sa, ta, ETH_ALEN);
  221. eth_broadcast_addr(frame->bssid);
  222. frame->seq_ctrl = 0;
  223. len += 24;
  224. /* for passive scans, no need to fill anything */
  225. if (n_ssids == 0)
  226. return (u16)len;
  227. /* points to the payload of the request */
  228. pos = &frame->u.probe_req.variable[0];
  229. /* fill in our SSID IE */
  230. left -= ssid_len + 2;
  231. if (left < 0)
  232. return 0;
  233. *pos++ = WLAN_EID_SSID;
  234. *pos++ = ssid_len;
  235. if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
  236. memcpy(pos, ssid, ssid_len);
  237. pos += ssid_len;
  238. }
  239. len += ssid_len + 2;
  240. if (WARN_ON(left < band_ie_len + common_ie_len))
  241. return len;
  242. if (band_ie && band_ie_len) {
  243. memcpy(pos, band_ie, band_ie_len);
  244. pos += band_ie_len;
  245. len += band_ie_len;
  246. }
  247. if (common_ie && common_ie_len) {
  248. memcpy(pos, common_ie, common_ie_len);
  249. pos += common_ie_len;
  250. len += common_ie_len;
  251. }
  252. return (u16)len;
  253. }
  254. static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
  255. struct ieee80211_vif *vif)
  256. {
  257. struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  258. bool *global_bound = data;
  259. if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
  260. mvmvif->phy_ctxt->id < MAX_PHYS)
  261. *global_bound = true;
  262. }
  263. static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
  264. struct ieee80211_vif *vif,
  265. int n_ssids, u32 flags,
  266. struct iwl_mvm_scan_params *params)
  267. {
  268. bool global_bound = false;
  269. enum ieee80211_band band;
  270. u8 frag_passive_dwell = 0;
  271. ieee80211_iterate_active_interfaces_atomic(mvm->hw,
  272. IEEE80211_IFACE_ITER_NORMAL,
  273. iwl_mvm_scan_condition_iterator,
  274. &global_bound);
  275. if (!global_bound)
  276. goto not_bound;
  277. params->suspend_time = 30;
  278. params->max_out_time = 170;
  279. if (iwl_mvm_low_latency(mvm)) {
  280. if (mvm->fw->ucode_capa.api[0] &
  281. IWL_UCODE_TLV_API_FRAGMENTED_SCAN) {
  282. params->suspend_time = 105;
  283. params->max_out_time = 70;
  284. frag_passive_dwell = 20;
  285. } else {
  286. params->suspend_time = 120;
  287. params->max_out_time = 120;
  288. }
  289. }
  290. if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] &
  291. IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
  292. /*
  293. * P2P device scan should not be fragmented to avoid negative
  294. * impact on P2P device discovery. Configure max_out_time to be
  295. * equal to dwell time on passive channel. Take a longest
  296. * possible value, one that corresponds to 2GHz band
  297. */
  298. if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
  299. u32 passive_dwell =
  300. iwl_mvm_get_passive_dwell(mvm,
  301. IEEE80211_BAND_2GHZ);
  302. params->max_out_time = passive_dwell;
  303. } else {
  304. params->passive_fragmented = true;
  305. }
  306. }
  307. if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
  308. params->max_out_time = 200;
  309. not_bound:
  310. for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
  311. if (params->passive_fragmented)
  312. params->dwell[band].passive = frag_passive_dwell;
  313. else
  314. params->dwell[band].passive =
  315. iwl_mvm_get_passive_dwell(mvm, band);
  316. params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
  317. n_ssids);
  318. }
  319. }
  320. static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
  321. {
  322. /* require rrm scan whenever the fw supports it */
  323. return mvm->fw->ucode_capa.capa[0] &
  324. IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT;
  325. }
  326. static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
  327. bool is_sched_scan)
  328. {
  329. int max_probe_len;
  330. if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
  331. max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
  332. else
  333. max_probe_len = mvm->fw->ucode_capa.max_probe_length;
  334. /* we create the 802.11 header and SSID element */
  335. max_probe_len -= 24 + 2;
  336. /* basic ssid is added only for hw_scan with and old api */
  337. if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID) &&
  338. !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) &&
  339. !is_sched_scan)
  340. max_probe_len -= 32;
  341. /* DS parameter set element is added on 2.4GHZ band if required */
  342. if (iwl_mvm_rrm_scan_needed(mvm))
  343. max_probe_len -= 3;
  344. return max_probe_len;
  345. }
  346. int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
  347. {
  348. int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan);
  349. if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN))
  350. return max_ie_len;
  351. /* TODO: [BUG] This function should return the maximum allowed size of
  352. * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
  353. * in the same command. So the correct implementation of this function
  354. * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
  355. * command has only 512 bytes and it would leave us with about 240
  356. * bytes for scan IEs, which is clearly not enough. So meanwhile
  357. * we will report an incorrect value. This may result in a failure to
  358. * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
  359. * functions with -ENOBUFS, if a large enough probe will be provided.
  360. */
  361. return max_ie_len;
  362. }
  363. int iwl_mvm_scan_request(struct iwl_mvm *mvm,
  364. struct ieee80211_vif *vif,
  365. struct cfg80211_scan_request *req)
  366. {
  367. struct iwl_host_cmd hcmd = {
  368. .id = SCAN_REQUEST_CMD,
  369. .len = { 0, },
  370. .data = { mvm->scan_cmd, },
  371. .dataflags = { IWL_HCMD_DFL_NOCOPY, },
  372. };
  373. struct iwl_scan_cmd *cmd = mvm->scan_cmd;
  374. int ret;
  375. u32 status;
  376. int ssid_len = 0;
  377. u8 *ssid = NULL;
  378. bool basic_ssid = !(mvm->fw->ucode_capa.flags &
  379. IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
  380. struct iwl_mvm_scan_params params = {};
  381. lockdep_assert_held(&mvm->mutex);
  382. /* we should have failed registration if scan_cmd was NULL */
  383. if (WARN_ON(mvm->scan_cmd == NULL))
  384. return -ENOMEM;
  385. IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
  386. mvm->scan_status = IWL_MVM_SCAN_OS;
  387. memset(cmd, 0, ksize(cmd));
  388. cmd->channel_count = (u8)req->n_channels;
  389. cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
  390. cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
  391. cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
  392. iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags, &params);
  393. cmd->max_out_time = cpu_to_le32(params.max_out_time);
  394. cmd->suspend_time = cpu_to_le32(params.suspend_time);
  395. if (params.passive_fragmented)
  396. cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
  397. cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
  398. cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
  399. MAC_FILTER_IN_BEACON);
  400. if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
  401. cmd->type = cpu_to_le32(SCAN_TYPE_DISCOVERY_FORCED);
  402. else
  403. cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
  404. cmd->repeats = cpu_to_le32(1);
  405. /*
  406. * If the user asked for passive scan, don't change to active scan if
  407. * you see any activity on the channel - remain passive.
  408. */
  409. if (req->n_ssids > 0) {
  410. cmd->passive2active = cpu_to_le16(1);
  411. cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
  412. if (basic_ssid) {
  413. ssid = req->ssids[0].ssid;
  414. ssid_len = req->ssids[0].ssid_len;
  415. }
  416. } else {
  417. cmd->passive2active = 0;
  418. cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
  419. }
  420. iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->ssids, req->n_ssids,
  421. basic_ssid ? 1 : 0);
  422. cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
  423. 3 << TX_CMD_FLG_BT_PRIO_POS);
  424. cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
  425. cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
  426. cmd->tx_cmd.rate_n_flags =
  427. iwl_mvm_scan_rate_n_flags(mvm, req->channels[0]->band,
  428. req->no_cck);
  429. cmd->tx_cmd.len =
  430. cpu_to_le16(iwl_mvm_fill_probe_req(
  431. (struct ieee80211_mgmt *)cmd->data,
  432. vif->addr,
  433. req->n_ssids, ssid, ssid_len,
  434. req->ie, req->ie_len, NULL, 0,
  435. mvm->fw->ucode_capa.max_probe_length));
  436. iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, &params);
  437. cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
  438. le16_to_cpu(cmd->tx_cmd.len) +
  439. (cmd->channel_count * sizeof(struct iwl_scan_channel)));
  440. hcmd.len[0] = le16_to_cpu(cmd->len);
  441. status = SCAN_RESPONSE_OK;
  442. ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &status);
  443. if (!ret && status == SCAN_RESPONSE_OK) {
  444. IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
  445. } else {
  446. /*
  447. * If the scan failed, it usually means that the FW was unable
  448. * to allocate the time events. Warn on it, but maybe we
  449. * should try to send the command again with different params.
  450. */
  451. IWL_ERR(mvm, "Scan failed! status 0x%x ret %d\n",
  452. status, ret);
  453. mvm->scan_status = IWL_MVM_SCAN_NONE;
  454. ret = -EIO;
  455. }
  456. return ret;
  457. }
  458. int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
  459. struct iwl_device_cmd *cmd)
  460. {
  461. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  462. struct iwl_cmd_response *resp = (void *)pkt->data;
  463. IWL_DEBUG_SCAN(mvm, "Scan response received. status 0x%x\n",
  464. le32_to_cpu(resp->status));
  465. return 0;
  466. }
  467. int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
  468. struct iwl_device_cmd *cmd)
  469. {
  470. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  471. struct iwl_scan_complete_notif *notif = (void *)pkt->data;
  472. lockdep_assert_held(&mvm->mutex);
  473. IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
  474. notif->status, notif->scanned_channels);
  475. if (mvm->scan_status == IWL_MVM_SCAN_OS)
  476. mvm->scan_status = IWL_MVM_SCAN_NONE;
  477. ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
  478. iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
  479. return 0;
  480. }
  481. int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
  482. struct iwl_rx_cmd_buffer *rxb,
  483. struct iwl_device_cmd *cmd)
  484. {
  485. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  486. if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) &&
  487. !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
  488. struct iwl_sched_scan_results *notif = (void *)pkt->data;
  489. if (!(notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN))
  490. return 0;
  491. }
  492. IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
  493. ieee80211_sched_scan_results(mvm->hw);
  494. return 0;
  495. }
  496. static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
  497. struct iwl_rx_packet *pkt, void *data)
  498. {
  499. struct iwl_mvm *mvm =
  500. container_of(notif_wait, struct iwl_mvm, notif_wait);
  501. struct iwl_scan_complete_notif *notif;
  502. u32 *resp;
  503. switch (pkt->hdr.cmd) {
  504. case SCAN_ABORT_CMD:
  505. resp = (void *)pkt->data;
  506. if (*resp == CAN_ABORT_STATUS) {
  507. IWL_DEBUG_SCAN(mvm,
  508. "Scan can be aborted, wait until completion\n");
  509. return false;
  510. }
  511. /*
  512. * If scan cannot be aborted, it means that we had a
  513. * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
  514. * ieee80211_scan_completed already.
  515. */
  516. IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
  517. *resp);
  518. return true;
  519. case SCAN_COMPLETE_NOTIFICATION:
  520. notif = (void *)pkt->data;
  521. IWL_DEBUG_SCAN(mvm, "Scan aborted: status 0x%x\n",
  522. notif->status);
  523. return true;
  524. default:
  525. WARN_ON(1);
  526. return false;
  527. };
  528. }
  529. static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm)
  530. {
  531. struct iwl_notification_wait wait_scan_abort;
  532. static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
  533. SCAN_COMPLETE_NOTIFICATION };
  534. int ret;
  535. iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
  536. scan_abort_notif,
  537. ARRAY_SIZE(scan_abort_notif),
  538. iwl_mvm_scan_abort_notif, NULL);
  539. ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 0, 0, NULL);
  540. if (ret) {
  541. IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
  542. /* mac80211's state will be cleaned in the nic_restart flow */
  543. goto out_remove_notif;
  544. }
  545. return iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, HZ);
  546. out_remove_notif:
  547. iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
  548. return ret;
  549. }
  550. int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
  551. struct iwl_rx_cmd_buffer *rxb,
  552. struct iwl_device_cmd *cmd)
  553. {
  554. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  555. u8 status, ebs_status;
  556. if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) {
  557. struct iwl_periodic_scan_complete *scan_notif;
  558. scan_notif = (void *)pkt->data;
  559. status = scan_notif->status;
  560. ebs_status = scan_notif->ebs_status;
  561. } else {
  562. struct iwl_scan_offload_complete *scan_notif;
  563. scan_notif = (void *)pkt->data;
  564. status = scan_notif->status;
  565. ebs_status = scan_notif->ebs_status;
  566. }
  567. /* scan status must be locked for proper checking */
  568. lockdep_assert_held(&mvm->mutex);
  569. IWL_DEBUG_SCAN(mvm,
  570. "%s completed, status %s, EBS status %s\n",
  571. mvm->scan_status == IWL_MVM_SCAN_SCHED ?
  572. "Scheduled scan" : "Scan",
  573. status == IWL_SCAN_OFFLOAD_COMPLETED ?
  574. "completed" : "aborted",
  575. ebs_status == IWL_SCAN_EBS_SUCCESS ?
  576. "success" : "failed");
  577. /* only call mac80211 completion if the stop was initiated by FW */
  578. if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
  579. mvm->scan_status = IWL_MVM_SCAN_NONE;
  580. ieee80211_sched_scan_stopped(mvm->hw);
  581. } else if (mvm->scan_status == IWL_MVM_SCAN_OS) {
  582. mvm->scan_status = IWL_MVM_SCAN_NONE;
  583. ieee80211_scan_completed(mvm->hw,
  584. status == IWL_SCAN_OFFLOAD_ABORTED);
  585. iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
  586. }
  587. mvm->last_ebs_successful = !ebs_status;
  588. return 0;
  589. }
  590. static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
  591. struct ieee80211_vif *vif,
  592. struct ieee80211_scan_ies *ies,
  593. enum ieee80211_band band,
  594. struct iwl_tx_cmd *cmd,
  595. u8 *data)
  596. {
  597. u16 cmd_len;
  598. cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
  599. cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
  600. cmd->sta_id = mvm->aux_sta.sta_id;
  601. cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false);
  602. cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
  603. vif->addr,
  604. 1, NULL, 0,
  605. ies->ies[band], ies->len[band],
  606. ies->common_ies, ies->common_ie_len,
  607. SCAN_OFFLOAD_PROBE_REQ_SIZE);
  608. cmd->len = cpu_to_le16(cmd_len);
  609. }
  610. static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
  611. struct ieee80211_vif *vif,
  612. struct cfg80211_sched_scan_request *req,
  613. struct iwl_scan_offload_cmd *scan,
  614. struct iwl_mvm_scan_params *params)
  615. {
  616. scan->channel_count = req->n_channels;
  617. scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
  618. scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
  619. scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
  620. scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
  621. scan->max_out_time = cpu_to_le32(params->max_out_time);
  622. scan->suspend_time = cpu_to_le32(params->suspend_time);
  623. scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
  624. MAC_FILTER_IN_BEACON);
  625. scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
  626. scan->rep_count = cpu_to_le32(1);
  627. if (params->passive_fragmented)
  628. scan->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
  629. }
  630. static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
  631. {
  632. int i;
  633. for (i = 0; i < PROBE_OPTION_MAX; i++) {
  634. if (!ssid_list[i].len)
  635. break;
  636. if (ssid_list[i].len == ssid_len &&
  637. !memcmp(ssid_list->ssid, ssid, ssid_len))
  638. return i;
  639. }
  640. return -1;
  641. }
  642. static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
  643. struct iwl_ssid_ie *direct_scan,
  644. u32 *ssid_bitmap, bool basic_ssid)
  645. {
  646. int i, j;
  647. int index;
  648. /*
  649. * copy SSIDs from match list.
  650. * iwl_config_sched_scan_profiles() uses the order of these ssids to
  651. * config match list.
  652. */
  653. for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
  654. /* skip empty SSID matchsets */
  655. if (!req->match_sets[i].ssid.ssid_len)
  656. continue;
  657. direct_scan[i].id = WLAN_EID_SSID;
  658. direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
  659. memcpy(direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
  660. direct_scan[i].len);
  661. }
  662. /* add SSIDs from scan SSID list */
  663. *ssid_bitmap = 0;
  664. for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
  665. index = iwl_ssid_exist(req->ssids[j].ssid,
  666. req->ssids[j].ssid_len,
  667. direct_scan);
  668. if (index < 0) {
  669. if (!req->ssids[j].ssid_len && basic_ssid)
  670. continue;
  671. direct_scan[i].id = WLAN_EID_SSID;
  672. direct_scan[i].len = req->ssids[j].ssid_len;
  673. memcpy(direct_scan[i].ssid, req->ssids[j].ssid,
  674. direct_scan[i].len);
  675. *ssid_bitmap |= BIT(i + 1);
  676. i++;
  677. } else {
  678. *ssid_bitmap |= BIT(index + 1);
  679. }
  680. }
  681. }
  682. static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
  683. struct cfg80211_sched_scan_request *req,
  684. u8 *channels_buffer,
  685. enum ieee80211_band band,
  686. int *head,
  687. u32 ssid_bitmap,
  688. struct iwl_mvm_scan_params *params)
  689. {
  690. u32 n_channels = mvm->fw->ucode_capa.n_scan_channels;
  691. __le32 *type = (__le32 *)channels_buffer;
  692. __le16 *channel_number = (__le16 *)(type + n_channels);
  693. __le16 *iter_count = channel_number + n_channels;
  694. __le32 *iter_interval = (__le32 *)(iter_count + n_channels);
  695. u8 *active_dwell = (u8 *)(iter_interval + n_channels);
  696. u8 *passive_dwell = active_dwell + n_channels;
  697. int i, index = 0;
  698. for (i = 0; i < req->n_channels; i++) {
  699. struct ieee80211_channel *chan = req->channels[i];
  700. if (chan->band != band)
  701. continue;
  702. index = *head;
  703. (*head)++;
  704. channel_number[index] = cpu_to_le16(chan->hw_value);
  705. active_dwell[index] = params->dwell[band].active;
  706. passive_dwell[index] = params->dwell[band].passive;
  707. iter_count[index] = cpu_to_le16(1);
  708. iter_interval[index] = 0;
  709. if (!(chan->flags & IEEE80211_CHAN_NO_IR))
  710. type[index] |=
  711. cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
  712. type[index] |= cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
  713. IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
  714. if (chan->flags & IEEE80211_CHAN_NO_HT40)
  715. type[index] |=
  716. cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
  717. /* scan for all SSIDs from req->ssids */
  718. type[index] |= cpu_to_le32(ssid_bitmap);
  719. }
  720. }
  721. int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
  722. struct ieee80211_vif *vif,
  723. struct cfg80211_sched_scan_request *req,
  724. struct ieee80211_scan_ies *ies)
  725. {
  726. int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
  727. int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
  728. int head = 0;
  729. u32 ssid_bitmap;
  730. int cmd_len;
  731. int ret;
  732. u8 *probes;
  733. bool basic_ssid = !(mvm->fw->ucode_capa.flags &
  734. IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
  735. struct iwl_scan_offload_cfg *scan_cfg;
  736. struct iwl_host_cmd cmd = {
  737. .id = SCAN_OFFLOAD_CONFIG_CMD,
  738. };
  739. struct iwl_mvm_scan_params params = {};
  740. lockdep_assert_held(&mvm->mutex);
  741. cmd_len = sizeof(struct iwl_scan_offload_cfg) +
  742. mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE +
  743. 2 * SCAN_OFFLOAD_PROBE_REQ_SIZE;
  744. scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
  745. if (!scan_cfg)
  746. return -ENOMEM;
  747. probes = scan_cfg->data +
  748. mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE;
  749. iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
  750. iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, &params);
  751. scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
  752. iwl_scan_offload_build_ssid(req, scan_cfg->scan_cmd.direct_scan,
  753. &ssid_bitmap, basic_ssid);
  754. /* build tx frames for supported bands */
  755. if (band_2ghz) {
  756. iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
  757. IEEE80211_BAND_2GHZ,
  758. &scan_cfg->scan_cmd.tx_cmd[0],
  759. probes);
  760. iwl_build_channel_cfg(mvm, req, scan_cfg->data,
  761. IEEE80211_BAND_2GHZ, &head,
  762. ssid_bitmap, &params);
  763. }
  764. if (band_5ghz) {
  765. iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
  766. IEEE80211_BAND_5GHZ,
  767. &scan_cfg->scan_cmd.tx_cmd[1],
  768. probes +
  769. SCAN_OFFLOAD_PROBE_REQ_SIZE);
  770. iwl_build_channel_cfg(mvm, req, scan_cfg->data,
  771. IEEE80211_BAND_5GHZ, &head,
  772. ssid_bitmap, &params);
  773. }
  774. cmd.data[0] = scan_cfg;
  775. cmd.len[0] = cmd_len;
  776. cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
  777. IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n");
  778. ret = iwl_mvm_send_cmd(mvm, &cmd);
  779. kfree(scan_cfg);
  780. return ret;
  781. }
  782. int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
  783. struct cfg80211_sched_scan_request *req)
  784. {
  785. struct iwl_scan_offload_profile *profile;
  786. struct iwl_scan_offload_profile_cfg *profile_cfg;
  787. struct iwl_scan_offload_blacklist *blacklist;
  788. struct iwl_host_cmd cmd = {
  789. .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
  790. .len[1] = sizeof(*profile_cfg),
  791. .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
  792. .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
  793. };
  794. int blacklist_len;
  795. int i;
  796. int ret;
  797. if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
  798. return -EIO;
  799. if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
  800. blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
  801. else
  802. blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
  803. blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
  804. if (!blacklist)
  805. return -ENOMEM;
  806. profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
  807. if (!profile_cfg) {
  808. ret = -ENOMEM;
  809. goto free_blacklist;
  810. }
  811. cmd.data[0] = blacklist;
  812. cmd.len[0] = sizeof(*blacklist) * blacklist_len;
  813. cmd.data[1] = profile_cfg;
  814. /* No blacklist configuration */
  815. profile_cfg->num_profiles = req->n_match_sets;
  816. profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
  817. profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
  818. profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
  819. if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
  820. profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
  821. for (i = 0; i < req->n_match_sets; i++) {
  822. profile = &profile_cfg->profiles[i];
  823. profile->ssid_index = i;
  824. /* Support any cipher and auth algorithm */
  825. profile->unicast_cipher = 0xff;
  826. profile->auth_alg = 0xff;
  827. profile->network_type = IWL_NETWORK_TYPE_ANY;
  828. profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
  829. profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
  830. }
  831. IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
  832. ret = iwl_mvm_send_cmd(mvm, &cmd);
  833. kfree(profile_cfg);
  834. free_blacklist:
  835. kfree(blacklist);
  836. return ret;
  837. }
  838. static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
  839. struct cfg80211_sched_scan_request *req)
  840. {
  841. if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
  842. IWL_DEBUG_SCAN(mvm,
  843. "Sending scheduled scan with filtering, n_match_sets %d\n",
  844. req->n_match_sets);
  845. return false;
  846. }
  847. IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
  848. return true;
  849. }
  850. int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
  851. struct cfg80211_sched_scan_request *req)
  852. {
  853. struct iwl_scan_offload_req scan_req = {
  854. .watchdog = IWL_SCHED_SCAN_WATCHDOG,
  855. .schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
  856. .schedule_line[0].delay = cpu_to_le16(req->interval / 1000),
  857. .schedule_line[0].full_scan_mul = 1,
  858. .schedule_line[1].iterations = 0xff,
  859. .schedule_line[1].delay = cpu_to_le16(req->interval / 1000),
  860. .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
  861. };
  862. if (iwl_mvm_scan_pass_all(mvm, req))
  863. scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
  864. if (mvm->last_ebs_successful &&
  865. mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
  866. scan_req.flags |=
  867. cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
  868. return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, 0,
  869. sizeof(scan_req), &scan_req);
  870. }
  871. int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
  872. struct ieee80211_vif *vif,
  873. struct cfg80211_sched_scan_request *req,
  874. struct ieee80211_scan_ies *ies)
  875. {
  876. int ret;
  877. if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
  878. ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
  879. if (ret)
  880. return ret;
  881. ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
  882. } else if ((mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
  883. mvm->scan_status = IWL_MVM_SCAN_SCHED;
  884. ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
  885. if (ret)
  886. return ret;
  887. ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
  888. } else {
  889. mvm->scan_status = IWL_MVM_SCAN_SCHED;
  890. ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
  891. if (ret)
  892. return ret;
  893. ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
  894. if (ret)
  895. return ret;
  896. ret = iwl_mvm_sched_scan_start(mvm, req);
  897. }
  898. return ret;
  899. }
  900. static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
  901. {
  902. int ret;
  903. struct iwl_host_cmd cmd = {
  904. .id = SCAN_OFFLOAD_ABORT_CMD,
  905. };
  906. u32 status;
  907. /* Exit instantly with error when device is not ready
  908. * to receive scan abort command or it does not perform
  909. * scheduled scan currently */
  910. if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
  911. (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
  912. mvm->scan_status != IWL_MVM_SCAN_OS))
  913. return -EIO;
  914. ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
  915. if (ret)
  916. return ret;
  917. if (status != CAN_ABORT_STATUS) {
  918. /*
  919. * The scan abort will return 1 for success or
  920. * 2 for "failure". A failure condition can be
  921. * due to simply not being in an active scan which
  922. * can occur if we send the scan abort before the
  923. * microcode has notified us that a scan is completed.
  924. */
  925. IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
  926. ret = -ENOENT;
  927. }
  928. return ret;
  929. }
  930. int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
  931. {
  932. int ret;
  933. struct iwl_notification_wait wait_scan_done;
  934. static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
  935. bool sched = mvm->scan_status == IWL_MVM_SCAN_SCHED;
  936. lockdep_assert_held(&mvm->mutex);
  937. if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
  938. return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
  939. notify);
  940. if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
  941. (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
  942. mvm->scan_status != IWL_MVM_SCAN_OS)) {
  943. IWL_DEBUG_SCAN(mvm, "No scan to stop\n");
  944. return 0;
  945. }
  946. iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
  947. scan_done_notif,
  948. ARRAY_SIZE(scan_done_notif),
  949. NULL, NULL);
  950. ret = iwl_mvm_send_scan_offload_abort(mvm);
  951. if (ret) {
  952. IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
  953. sched ? "offloaded " : "", ret);
  954. iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
  955. return ret;
  956. }
  957. IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
  958. sched ? "offloaded " : "");
  959. ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
  960. if (ret)
  961. return ret;
  962. /*
  963. * Clear the scan status so the next scan requests will succeed. This
  964. * also ensures the Rx handler doesn't do anything, as the scan was
  965. * stopped from above. Since the rx handler won't do anything now,
  966. * we have to release the scan reference here.
  967. */
  968. if (mvm->scan_status == IWL_MVM_SCAN_OS)
  969. iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
  970. mvm->scan_status = IWL_MVM_SCAN_NONE;
  971. if (notify) {
  972. if (sched)
  973. ieee80211_sched_scan_stopped(mvm->hw);
  974. else
  975. ieee80211_scan_completed(mvm->hw, true);
  976. }
  977. return 0;
  978. }
  979. static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
  980. struct iwl_scan_req_tx_cmd *tx_cmd,
  981. bool no_cck)
  982. {
  983. tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
  984. TX_CMD_FLG_BT_DIS);
  985. tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
  986. IEEE80211_BAND_2GHZ,
  987. no_cck);
  988. tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
  989. tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
  990. TX_CMD_FLG_BT_DIS);
  991. tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
  992. IEEE80211_BAND_5GHZ,
  993. no_cck);
  994. tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
  995. }
  996. static void
  997. iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
  998. struct ieee80211_channel **channels,
  999. int n_channels, u32 ssid_bitmap,
  1000. struct iwl_scan_req_unified_lmac *cmd)
  1001. {
  1002. struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
  1003. int i;
  1004. for (i = 0; i < n_channels; i++) {
  1005. channel_cfg[i].channel_num =
  1006. cpu_to_le16(channels[i]->hw_value);
  1007. channel_cfg[i].iter_count = cpu_to_le16(1);
  1008. channel_cfg[i].iter_interval = 0;
  1009. channel_cfg[i].flags =
  1010. cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
  1011. ssid_bitmap);
  1012. }
  1013. }
  1014. static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
  1015. size_t len, u8 *const pos)
  1016. {
  1017. static const u8 before_ds_params[] = {
  1018. WLAN_EID_SSID,
  1019. WLAN_EID_SUPP_RATES,
  1020. WLAN_EID_REQUEST,
  1021. WLAN_EID_EXT_SUPP_RATES,
  1022. };
  1023. size_t offs;
  1024. u8 *newpos = pos;
  1025. if (!iwl_mvm_rrm_scan_needed(mvm)) {
  1026. memcpy(newpos, ies, len);
  1027. return newpos + len;
  1028. }
  1029. offs = ieee80211_ie_split(ies, len,
  1030. before_ds_params,
  1031. ARRAY_SIZE(before_ds_params),
  1032. 0);
  1033. memcpy(newpos, ies, offs);
  1034. newpos += offs;
  1035. /* Add a placeholder for DS Parameter Set element */
  1036. *newpos++ = WLAN_EID_DS_PARAMS;
  1037. *newpos++ = 1;
  1038. *newpos++ = 0;
  1039. memcpy(newpos, ies + offs, len - offs);
  1040. newpos += len - offs;
  1041. return newpos;
  1042. }
  1043. static void
  1044. iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  1045. struct ieee80211_scan_ies *ies,
  1046. struct iwl_scan_probe_req *preq,
  1047. const u8 *mac_addr, const u8 *mac_addr_mask)
  1048. {
  1049. struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf;
  1050. u8 *pos, *newpos;
  1051. /*
  1052. * Unfortunately, right now the offload scan doesn't support randomising
  1053. * within the firmware, so until the firmware API is ready we implement
  1054. * it in the driver. This means that the scan iterations won't really be
  1055. * random, only when it's restarted, but at least that helps a bit.
  1056. */
  1057. if (mac_addr)
  1058. get_random_mask_addr(frame->sa, mac_addr, mac_addr_mask);
  1059. else
  1060. memcpy(frame->sa, vif->addr, ETH_ALEN);
  1061. frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
  1062. eth_broadcast_addr(frame->da);
  1063. eth_broadcast_addr(frame->bssid);
  1064. frame->seq_ctrl = 0;
  1065. pos = frame->u.probe_req.variable;
  1066. *pos++ = WLAN_EID_SSID;
  1067. *pos++ = 0;
  1068. preq->mac_header.offset = 0;
  1069. preq->mac_header.len = cpu_to_le16(24 + 2);
  1070. /* Insert ds parameter set element on 2.4 GHz band */
  1071. newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
  1072. ies->ies[IEEE80211_BAND_2GHZ],
  1073. ies->len[IEEE80211_BAND_2GHZ],
  1074. pos);
  1075. preq->band_data[0].offset = cpu_to_le16(pos - preq->buf);
  1076. preq->band_data[0].len = cpu_to_le16(newpos - pos);
  1077. pos = newpos;
  1078. memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
  1079. ies->len[IEEE80211_BAND_5GHZ]);
  1080. preq->band_data[1].offset = cpu_to_le16(pos - preq->buf);
  1081. preq->band_data[1].len = cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
  1082. pos += ies->len[IEEE80211_BAND_5GHZ];
  1083. memcpy(pos, ies->common_ies, ies->common_ie_len);
  1084. preq->common_data.offset = cpu_to_le16(pos - preq->buf);
  1085. preq->common_data.len = cpu_to_le16(ies->common_ie_len);
  1086. }
  1087. static void
  1088. iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
  1089. struct iwl_scan_req_unified_lmac *cmd,
  1090. struct iwl_mvm_scan_params *params)
  1091. {
  1092. memset(cmd, 0, ksize(cmd));
  1093. cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
  1094. cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
  1095. if (params->passive_fragmented)
  1096. cmd->fragmented_dwell =
  1097. params->dwell[IEEE80211_BAND_2GHZ].passive;
  1098. cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
  1099. cmd->max_out_time = cpu_to_le32(params->max_out_time);
  1100. cmd->suspend_time = cpu_to_le32(params->suspend_time);
  1101. cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
  1102. cmd->iter_num = cpu_to_le32(1);
  1103. if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
  1104. mvm->last_ebs_successful) {
  1105. cmd->channel_opt[0].flags =
  1106. cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
  1107. IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
  1108. IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
  1109. cmd->channel_opt[1].flags =
  1110. cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
  1111. IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
  1112. IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
  1113. }
  1114. if (iwl_mvm_rrm_scan_needed(mvm))
  1115. cmd->scan_flags |=
  1116. cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
  1117. }
  1118. int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
  1119. struct ieee80211_vif *vif,
  1120. struct ieee80211_scan_request *req)
  1121. {
  1122. struct iwl_host_cmd hcmd = {
  1123. .id = SCAN_OFFLOAD_REQUEST_CMD,
  1124. .len = { sizeof(struct iwl_scan_req_unified_lmac) +
  1125. sizeof(struct iwl_scan_channel_cfg_lmac) *
  1126. mvm->fw->ucode_capa.n_scan_channels +
  1127. sizeof(struct iwl_scan_probe_req), },
  1128. .data = { mvm->scan_cmd, },
  1129. .dataflags = { IWL_HCMD_DFL_NOCOPY, },
  1130. };
  1131. struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
  1132. struct iwl_scan_probe_req *preq;
  1133. struct iwl_mvm_scan_params params = {};
  1134. u32 flags;
  1135. u32 ssid_bitmap = 0;
  1136. int ret, i;
  1137. lockdep_assert_held(&mvm->mutex);
  1138. /* we should have failed registration if scan_cmd was NULL */
  1139. if (WARN_ON(mvm->scan_cmd == NULL))
  1140. return -ENOMEM;
  1141. if (req->req.n_ssids > PROBE_OPTION_MAX ||
  1142. req->ies.common_ie_len + req->ies.len[NL80211_BAND_2GHZ] +
  1143. req->ies.len[NL80211_BAND_5GHZ] >
  1144. iwl_mvm_max_scan_ie_fw_cmd_room(mvm, false) ||
  1145. req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
  1146. return -ENOBUFS;
  1147. mvm->scan_status = IWL_MVM_SCAN_OS;
  1148. iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
  1149. &params);
  1150. iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
  1151. cmd->n_channels = (u8)req->req.n_channels;
  1152. flags = IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
  1153. if (req->req.n_ssids == 1 && req->req.ssids[0].ssid_len != 0)
  1154. flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
  1155. if (params.passive_fragmented)
  1156. flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
  1157. if (req->req.n_ssids == 0)
  1158. flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
  1159. cmd->scan_flags |= cpu_to_le32(flags);
  1160. cmd->flags = iwl_mvm_scan_rxon_flags(req->req.channels[0]->band);
  1161. cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
  1162. MAC_FILTER_IN_BEACON);
  1163. iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, req->req.no_cck);
  1164. iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->req.ssids,
  1165. req->req.n_ssids, 0);
  1166. cmd->schedule[0].delay = 0;
  1167. cmd->schedule[0].iterations = 1;
  1168. cmd->schedule[0].full_scan_mul = 0;
  1169. cmd->schedule[1].delay = 0;
  1170. cmd->schedule[1].iterations = 0;
  1171. cmd->schedule[1].full_scan_mul = 0;
  1172. for (i = 1; i <= req->req.n_ssids; i++)
  1173. ssid_bitmap |= BIT(i);
  1174. iwl_mvm_lmac_scan_cfg_channels(mvm, req->req.channels,
  1175. req->req.n_channels, ssid_bitmap,
  1176. cmd);
  1177. preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
  1178. mvm->fw->ucode_capa.n_scan_channels);
  1179. iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, preq,
  1180. req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
  1181. req->req.mac_addr : NULL,
  1182. req->req.mac_addr_mask);
  1183. ret = iwl_mvm_send_cmd(mvm, &hcmd);
  1184. if (!ret) {
  1185. IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
  1186. } else {
  1187. /*
  1188. * If the scan failed, it usually means that the FW was unable
  1189. * to allocate the time events. Warn on it, but maybe we
  1190. * should try to send the command again with different params.
  1191. */
  1192. IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
  1193. mvm->scan_status = IWL_MVM_SCAN_NONE;
  1194. ret = -EIO;
  1195. }
  1196. return ret;
  1197. }
  1198. int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
  1199. struct ieee80211_vif *vif,
  1200. struct cfg80211_sched_scan_request *req,
  1201. struct ieee80211_scan_ies *ies)
  1202. {
  1203. struct iwl_host_cmd hcmd = {
  1204. .id = SCAN_OFFLOAD_REQUEST_CMD,
  1205. .len = { sizeof(struct iwl_scan_req_unified_lmac) +
  1206. sizeof(struct iwl_scan_channel_cfg_lmac) *
  1207. mvm->fw->ucode_capa.n_scan_channels +
  1208. sizeof(struct iwl_scan_probe_req), },
  1209. .data = { mvm->scan_cmd, },
  1210. .dataflags = { IWL_HCMD_DFL_NOCOPY, },
  1211. };
  1212. struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
  1213. struct iwl_scan_probe_req *preq;
  1214. struct iwl_mvm_scan_params params = {};
  1215. int ret;
  1216. u32 flags = 0, ssid_bitmap = 0;
  1217. lockdep_assert_held(&mvm->mutex);
  1218. /* we should have failed registration if scan_cmd was NULL */
  1219. if (WARN_ON(mvm->scan_cmd == NULL))
  1220. return -ENOMEM;
  1221. if (req->n_ssids > PROBE_OPTION_MAX ||
  1222. ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
  1223. ies->len[NL80211_BAND_5GHZ] >
  1224. iwl_mvm_max_scan_ie_fw_cmd_room(mvm, true) ||
  1225. req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
  1226. return -ENOBUFS;
  1227. iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
  1228. iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
  1229. cmd->n_channels = (u8)req->n_channels;
  1230. if (iwl_mvm_scan_pass_all(mvm, req))
  1231. flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
  1232. else
  1233. flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
  1234. if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
  1235. flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
  1236. if (params.passive_fragmented)
  1237. flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
  1238. if (req->n_ssids == 0)
  1239. flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
  1240. cmd->scan_flags |= cpu_to_le32(flags);
  1241. cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
  1242. cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
  1243. MAC_FILTER_IN_BEACON);
  1244. iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, false);
  1245. iwl_scan_offload_build_ssid(req, cmd->direct_scan, &ssid_bitmap, false);
  1246. cmd->schedule[0].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
  1247. cmd->schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
  1248. cmd->schedule[0].full_scan_mul = 1;
  1249. cmd->schedule[1].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
  1250. cmd->schedule[1].iterations = 0xff;
  1251. cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
  1252. iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
  1253. ssid_bitmap, cmd);
  1254. preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
  1255. mvm->fw->ucode_capa.n_scan_channels);
  1256. iwl_mvm_build_unified_scan_probe(mvm, vif, ies, preq,
  1257. req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
  1258. req->mac_addr : NULL,
  1259. req->mac_addr_mask);
  1260. ret = iwl_mvm_send_cmd(mvm, &hcmd);
  1261. if (!ret) {
  1262. IWL_DEBUG_SCAN(mvm,
  1263. "Sched scan request was sent successfully\n");
  1264. } else {
  1265. /*
  1266. * If the scan failed, it usually means that the FW was unable
  1267. * to allocate the time events. Warn on it, but maybe we
  1268. * should try to send the command again with different params.
  1269. */
  1270. IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
  1271. mvm->scan_status = IWL_MVM_SCAN_NONE;
  1272. ret = -EIO;
  1273. }
  1274. return ret;
  1275. }
  1276. int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
  1277. {
  1278. if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
  1279. return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN,
  1280. true);
  1281. if (mvm->scan_status == IWL_MVM_SCAN_NONE)
  1282. return 0;
  1283. if (iwl_mvm_is_radio_killed(mvm)) {
  1284. ieee80211_scan_completed(mvm->hw, true);
  1285. iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
  1286. mvm->scan_status = IWL_MVM_SCAN_NONE;
  1287. return 0;
  1288. }
  1289. if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
  1290. return iwl_mvm_scan_offload_stop(mvm, true);
  1291. return iwl_mvm_cancel_regular_scan(mvm);
  1292. }
  1293. /* UMAC scan API */
  1294. struct iwl_umac_scan_done {
  1295. struct iwl_mvm *mvm;
  1296. enum iwl_umac_scan_uid_type type;
  1297. };
  1298. static int rate_to_scan_rate_flag(unsigned int rate)
  1299. {
  1300. static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
  1301. [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
  1302. [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
  1303. [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
  1304. [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
  1305. [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
  1306. [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
  1307. [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
  1308. [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
  1309. [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
  1310. [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
  1311. [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
  1312. [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
  1313. };
  1314. return rate_to_scan_rate[rate];
  1315. }
  1316. static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
  1317. {
  1318. struct ieee80211_supported_band *band;
  1319. unsigned int rates = 0;
  1320. int i;
  1321. band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
  1322. for (i = 0; i < band->n_bitrates; i++)
  1323. rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
  1324. band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
  1325. for (i = 0; i < band->n_bitrates; i++)
  1326. rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
  1327. /* Set both basic rates and supported rates */
  1328. rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
  1329. return cpu_to_le32(rates);
  1330. }
  1331. int iwl_mvm_config_scan(struct iwl_mvm *mvm)
  1332. {
  1333. struct iwl_scan_config *scan_config;
  1334. struct ieee80211_supported_band *band;
  1335. int num_channels =
  1336. mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
  1337. mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
  1338. int ret, i, j = 0, cmd_size, data_size;
  1339. struct iwl_host_cmd cmd = {
  1340. .id = SCAN_CFG_CMD,
  1341. };
  1342. if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
  1343. return -ENOBUFS;
  1344. cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
  1345. scan_config = kzalloc(cmd_size, GFP_KERNEL);
  1346. if (!scan_config)
  1347. return -ENOMEM;
  1348. data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
  1349. scan_config->hdr.size = cpu_to_le16(data_size);
  1350. scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
  1351. SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
  1352. SCAN_CONFIG_FLAG_SET_TX_CHAINS |
  1353. SCAN_CONFIG_FLAG_SET_RX_CHAINS |
  1354. SCAN_CONFIG_FLAG_SET_ALL_TIMES |
  1355. SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
  1356. SCAN_CONFIG_FLAG_SET_MAC_ADDR |
  1357. SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
  1358. SCAN_CONFIG_N_CHANNELS(num_channels));
  1359. scan_config->tx_chains = cpu_to_le32(mvm->fw->valid_tx_ant);
  1360. scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
  1361. scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
  1362. scan_config->out_of_channel_time = cpu_to_le32(170);
  1363. scan_config->suspend_time = cpu_to_le32(30);
  1364. scan_config->dwell_active = 20;
  1365. scan_config->dwell_passive = 110;
  1366. scan_config->dwell_fragmented = 20;
  1367. memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
  1368. scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
  1369. scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
  1370. IWL_CHANNEL_FLAG_ACCURATE_EBS |
  1371. IWL_CHANNEL_FLAG_EBS_ADD |
  1372. IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
  1373. band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
  1374. for (i = 0; i < band->n_channels; i++, j++)
  1375. scan_config->channel_array[j] = band->channels[i].center_freq;
  1376. band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
  1377. for (i = 0; i < band->n_channels; i++, j++)
  1378. scan_config->channel_array[j] = band->channels[i].center_freq;
  1379. cmd.data[0] = scan_config;
  1380. cmd.len[0] = cmd_size;
  1381. cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
  1382. IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
  1383. ret = iwl_mvm_send_cmd(mvm, &cmd);
  1384. kfree(scan_config);
  1385. return ret;
  1386. }
  1387. static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid)
  1388. {
  1389. int i;
  1390. for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
  1391. if (mvm->scan_uid[i] == uid)
  1392. return i;
  1393. return i;
  1394. }
  1395. static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm)
  1396. {
  1397. return iwl_mvm_find_scan_uid(mvm, 0);
  1398. }
  1399. static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
  1400. enum iwl_umac_scan_uid_type type)
  1401. {
  1402. int i;
  1403. for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
  1404. if (mvm->scan_uid[i] & type)
  1405. return true;
  1406. return false;
  1407. }
  1408. static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
  1409. enum iwl_umac_scan_uid_type type)
  1410. {
  1411. u32 uid;
  1412. /* make sure exactly one bit is on in scan type */
  1413. WARN_ON(hweight8(type) != 1);
  1414. /*
  1415. * Make sure scan uids are unique. If one scan lasts long time while
  1416. * others are completing frequently, the seq number will wrap up and
  1417. * we may have more than one scan with the same uid.
  1418. */
  1419. do {
  1420. uid = type | (mvm->scan_seq_num <<
  1421. IWL_UMAC_SCAN_UID_SEQ_OFFSET);
  1422. mvm->scan_seq_num++;
  1423. } while (iwl_mvm_find_scan_uid(mvm, uid) <
  1424. IWL_MVM_MAX_SIMULTANEOUS_SCANS);
  1425. IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid);
  1426. return uid;
  1427. }
  1428. static void
  1429. iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
  1430. struct iwl_scan_req_umac *cmd,
  1431. struct iwl_mvm_scan_params *params)
  1432. {
  1433. memset(cmd, 0, ksize(cmd));
  1434. cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
  1435. sizeof(struct iwl_mvm_umac_cmd_hdr));
  1436. cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
  1437. cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
  1438. if (params->passive_fragmented)
  1439. cmd->fragmented_dwell =
  1440. params->dwell[IEEE80211_BAND_2GHZ].passive;
  1441. cmd->max_out_time = cpu_to_le32(params->max_out_time);
  1442. cmd->suspend_time = cpu_to_le32(params->suspend_time);
  1443. cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
  1444. }
  1445. static void
  1446. iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
  1447. struct ieee80211_channel **channels,
  1448. int n_channels, u32 ssid_bitmap,
  1449. struct iwl_scan_req_umac *cmd)
  1450. {
  1451. struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
  1452. int i;
  1453. for (i = 0; i < n_channels; i++) {
  1454. channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
  1455. channel_cfg[i].channel_num = channels[i]->hw_value;
  1456. channel_cfg[i].iter_count = 1;
  1457. channel_cfg[i].iter_interval = 0;
  1458. }
  1459. }
  1460. static u32 iwl_mvm_scan_umac_common_flags(struct iwl_mvm *mvm, int n_ssids,
  1461. struct cfg80211_ssid *ssids,
  1462. int fragmented)
  1463. {
  1464. int flags = 0;
  1465. if (n_ssids == 0)
  1466. flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
  1467. if (n_ssids == 1 && ssids[0].ssid_len != 0)
  1468. flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
  1469. if (fragmented)
  1470. flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
  1471. if (iwl_mvm_rrm_scan_needed(mvm))
  1472. flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
  1473. return flags;
  1474. }
  1475. int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  1476. struct ieee80211_scan_request *req)
  1477. {
  1478. struct iwl_host_cmd hcmd = {
  1479. .id = SCAN_REQ_UMAC,
  1480. .len = { iwl_mvm_scan_size(mvm), },
  1481. .data = { mvm->scan_cmd, },
  1482. .dataflags = { IWL_HCMD_DFL_NOCOPY, },
  1483. };
  1484. struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
  1485. struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
  1486. sizeof(struct iwl_scan_channel_cfg_umac) *
  1487. mvm->fw->ucode_capa.n_scan_channels;
  1488. struct iwl_mvm_scan_params params = {};
  1489. u32 uid, flags;
  1490. u32 ssid_bitmap = 0;
  1491. int ret, i, uid_idx;
  1492. lockdep_assert_held(&mvm->mutex);
  1493. uid_idx = iwl_mvm_find_free_scan_uid(mvm);
  1494. if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
  1495. return -EBUSY;
  1496. /* we should have failed registration if scan_cmd was NULL */
  1497. if (WARN_ON(mvm->scan_cmd == NULL))
  1498. return -ENOMEM;
  1499. if (WARN_ON(req->req.n_ssids > PROBE_OPTION_MAX ||
  1500. req->ies.common_ie_len +
  1501. req->ies.len[NL80211_BAND_2GHZ] +
  1502. req->ies.len[NL80211_BAND_5GHZ] + 24 + 2 >
  1503. SCAN_OFFLOAD_PROBE_REQ_SIZE || req->req.n_channels >
  1504. mvm->fw->ucode_capa.n_scan_channels))
  1505. return -ENOBUFS;
  1506. iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
  1507. &params);
  1508. iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
  1509. uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
  1510. mvm->scan_uid[uid_idx] = uid;
  1511. cmd->uid = cpu_to_le32(uid);
  1512. cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
  1513. flags = iwl_mvm_scan_umac_common_flags(mvm, req->req.n_ssids,
  1514. req->req.ssids,
  1515. params.passive_fragmented);
  1516. flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
  1517. cmd->general_flags = cpu_to_le32(flags);
  1518. cmd->n_channels = req->req.n_channels;
  1519. for (i = 0; i < req->req.n_ssids; i++)
  1520. ssid_bitmap |= BIT(i);
  1521. iwl_mvm_umac_scan_cfg_channels(mvm, req->req.channels,
  1522. req->req.n_channels, ssid_bitmap, cmd);
  1523. sec_part->schedule[0].iter_count = 1;
  1524. sec_part->delay = 0;
  1525. iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, &sec_part->preq,
  1526. req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
  1527. req->req.mac_addr : NULL,
  1528. req->req.mac_addr_mask);
  1529. iwl_mvm_scan_fill_ssids(sec_part->direct_scan, req->req.ssids,
  1530. req->req.n_ssids, 0);
  1531. ret = iwl_mvm_send_cmd(mvm, &hcmd);
  1532. if (!ret) {
  1533. IWL_DEBUG_SCAN(mvm,
  1534. "Scan request was sent successfully\n");
  1535. } else {
  1536. /*
  1537. * If the scan failed, it usually means that the FW was unable
  1538. * to allocate the time events. Warn on it, but maybe we
  1539. * should try to send the command again with different params.
  1540. */
  1541. IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
  1542. }
  1543. return ret;
  1544. }
  1545. int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  1546. struct cfg80211_sched_scan_request *req,
  1547. struct ieee80211_scan_ies *ies)
  1548. {
  1549. struct iwl_host_cmd hcmd = {
  1550. .id = SCAN_REQ_UMAC,
  1551. .len = { iwl_mvm_scan_size(mvm), },
  1552. .data = { mvm->scan_cmd, },
  1553. .dataflags = { IWL_HCMD_DFL_NOCOPY, },
  1554. };
  1555. struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
  1556. struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
  1557. sizeof(struct iwl_scan_channel_cfg_umac) *
  1558. mvm->fw->ucode_capa.n_scan_channels;
  1559. struct iwl_mvm_scan_params params = {};
  1560. u32 uid, flags;
  1561. u32 ssid_bitmap = 0;
  1562. int ret, uid_idx;
  1563. lockdep_assert_held(&mvm->mutex);
  1564. uid_idx = iwl_mvm_find_free_scan_uid(mvm);
  1565. if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
  1566. return -EBUSY;
  1567. /* we should have failed registration if scan_cmd was NULL */
  1568. if (WARN_ON(mvm->scan_cmd == NULL))
  1569. return -ENOMEM;
  1570. if (WARN_ON(req->n_ssids > PROBE_OPTION_MAX ||
  1571. ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
  1572. ies->len[NL80211_BAND_5GHZ] + 24 + 2 >
  1573. SCAN_OFFLOAD_PROBE_REQ_SIZE || req->n_channels >
  1574. mvm->fw->ucode_capa.n_scan_channels))
  1575. return -ENOBUFS;
  1576. iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags,
  1577. &params);
  1578. iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
  1579. cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
  1580. uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN);
  1581. mvm->scan_uid[uid_idx] = uid;
  1582. cmd->uid = cpu_to_le32(uid);
  1583. cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
  1584. flags = iwl_mvm_scan_umac_common_flags(mvm, req->n_ssids, req->ssids,
  1585. params.passive_fragmented);
  1586. flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
  1587. if (iwl_mvm_scan_pass_all(mvm, req))
  1588. flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
  1589. else
  1590. flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
  1591. cmd->general_flags = cpu_to_le32(flags);
  1592. if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
  1593. mvm->last_ebs_successful)
  1594. cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
  1595. IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
  1596. IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
  1597. cmd->n_channels = req->n_channels;
  1598. iwl_scan_offload_build_ssid(req, sec_part->direct_scan, &ssid_bitmap,
  1599. false);
  1600. /* This API uses bits 0-19 instead of 1-20. */
  1601. ssid_bitmap = ssid_bitmap >> 1;
  1602. iwl_mvm_umac_scan_cfg_channels(mvm, req->channels, req->n_channels,
  1603. ssid_bitmap, cmd);
  1604. sec_part->schedule[0].interval =
  1605. cpu_to_le16(req->interval / MSEC_PER_SEC);
  1606. sec_part->schedule[0].iter_count = 0xff;
  1607. sec_part->delay = 0;
  1608. iwl_mvm_build_unified_scan_probe(mvm, vif, ies, &sec_part->preq,
  1609. req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
  1610. req->mac_addr : NULL,
  1611. req->mac_addr_mask);
  1612. ret = iwl_mvm_send_cmd(mvm, &hcmd);
  1613. if (!ret) {
  1614. IWL_DEBUG_SCAN(mvm,
  1615. "Sched scan request was sent successfully\n");
  1616. } else {
  1617. /*
  1618. * If the scan failed, it usually means that the FW was unable
  1619. * to allocate the time events. Warn on it, but maybe we
  1620. * should try to send the command again with different params.
  1621. */
  1622. IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
  1623. }
  1624. return ret;
  1625. }
  1626. int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
  1627. struct iwl_rx_cmd_buffer *rxb,
  1628. struct iwl_device_cmd *cmd)
  1629. {
  1630. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  1631. struct iwl_umac_scan_complete *notif = (void *)pkt->data;
  1632. u32 uid = __le32_to_cpu(notif->uid);
  1633. bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN);
  1634. int uid_idx = iwl_mvm_find_scan_uid(mvm, uid);
  1635. /*
  1636. * Scan uid may be set to zero in case of scan abort request from above.
  1637. */
  1638. if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
  1639. return 0;
  1640. IWL_DEBUG_SCAN(mvm,
  1641. "Scan completed, uid %u type %s, status %s, EBS status %s\n",
  1642. uid, sched ? "sched" : "regular",
  1643. notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
  1644. "completed" : "aborted",
  1645. notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
  1646. "success" : "failed");
  1647. mvm->last_ebs_successful = !notif->ebs_status;
  1648. mvm->scan_uid[uid_idx] = 0;
  1649. if (!sched) {
  1650. ieee80211_scan_completed(mvm->hw,
  1651. notif->status ==
  1652. IWL_SCAN_OFFLOAD_ABORTED);
  1653. iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
  1654. } else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) {
  1655. ieee80211_sched_scan_stopped(mvm->hw);
  1656. } else {
  1657. IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n");
  1658. }
  1659. return 0;
  1660. }
  1661. static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
  1662. struct iwl_rx_packet *pkt, void *data)
  1663. {
  1664. struct iwl_umac_scan_done *scan_done = data;
  1665. struct iwl_umac_scan_complete *notif = (void *)pkt->data;
  1666. u32 uid = __le32_to_cpu(notif->uid);
  1667. int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid);
  1668. if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC))
  1669. return false;
  1670. if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
  1671. return false;
  1672. /*
  1673. * Clear scan uid of scans that was aborted from above and completed
  1674. * in FW so the RX handler does nothing.
  1675. */
  1676. scan_done->mvm->scan_uid[uid_idx] = 0;
  1677. return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
  1678. }
  1679. static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid)
  1680. {
  1681. struct iwl_umac_scan_abort cmd = {
  1682. .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
  1683. sizeof(struct iwl_mvm_umac_cmd_hdr)),
  1684. .uid = cpu_to_le32(uid),
  1685. };
  1686. lockdep_assert_held(&mvm->mutex);
  1687. IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
  1688. return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
  1689. }
  1690. static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
  1691. enum iwl_umac_scan_uid_type type, bool notify)
  1692. {
  1693. struct iwl_notification_wait wait_scan_done;
  1694. static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, };
  1695. struct iwl_umac_scan_done scan_done = {
  1696. .mvm = mvm,
  1697. .type = type,
  1698. };
  1699. int i, ret = -EIO;
  1700. iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
  1701. scan_done_notif,
  1702. ARRAY_SIZE(scan_done_notif),
  1703. iwl_scan_umac_done_check, &scan_done);
  1704. IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
  1705. for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
  1706. if (mvm->scan_uid[i] & type) {
  1707. int err;
  1708. if (iwl_mvm_is_radio_killed(mvm) &&
  1709. (type & IWL_UMAC_SCAN_UID_REG_SCAN)) {
  1710. ieee80211_scan_completed(mvm->hw, true);
  1711. iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
  1712. break;
  1713. }
  1714. err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]);
  1715. if (!err)
  1716. ret = 0;
  1717. }
  1718. }
  1719. if (ret) {
  1720. IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n");
  1721. iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
  1722. return ret;
  1723. }
  1724. ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
  1725. if (ret)
  1726. return ret;
  1727. if (notify) {
  1728. if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN)
  1729. ieee80211_sched_scan_stopped(mvm->hw);
  1730. if (type & IWL_UMAC_SCAN_UID_REG_SCAN) {
  1731. ieee80211_scan_completed(mvm->hw, true);
  1732. iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
  1733. }
  1734. }
  1735. return ret;
  1736. }
  1737. int iwl_mvm_scan_size(struct iwl_mvm *mvm)
  1738. {
  1739. if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
  1740. return sizeof(struct iwl_scan_req_umac) +
  1741. sizeof(struct iwl_scan_channel_cfg_umac) *
  1742. mvm->fw->ucode_capa.n_scan_channels +
  1743. sizeof(struct iwl_scan_req_umac_tail);
  1744. if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
  1745. return sizeof(struct iwl_scan_req_unified_lmac) +
  1746. sizeof(struct iwl_scan_channel_cfg_lmac) *
  1747. mvm->fw->ucode_capa.n_scan_channels +
  1748. sizeof(struct iwl_scan_probe_req);
  1749. return sizeof(struct iwl_scan_cmd) +
  1750. mvm->fw->ucode_capa.max_probe_length +
  1751. mvm->fw->ucode_capa.n_scan_channels *
  1752. sizeof(struct iwl_scan_channel);
  1753. }