main.c 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676
  1. /*
  2. * Copyright (c) 2008-2011 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/nl80211.h>
  17. #include <linux/delay.h>
  18. #include "ath9k.h"
  19. #include "btcoex.h"
  20. u8 ath9k_parse_mpdudensity(u8 mpdudensity)
  21. {
  22. /*
  23. * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
  24. * 0 for no restriction
  25. * 1 for 1/4 us
  26. * 2 for 1/2 us
  27. * 3 for 1 us
  28. * 4 for 2 us
  29. * 5 for 4 us
  30. * 6 for 8 us
  31. * 7 for 16 us
  32. */
  33. switch (mpdudensity) {
  34. case 0:
  35. return 0;
  36. case 1:
  37. case 2:
  38. case 3:
  39. /* Our lower layer calculations limit our precision to
  40. 1 microsecond */
  41. return 1;
  42. case 4:
  43. return 2;
  44. case 5:
  45. return 4;
  46. case 6:
  47. return 8;
  48. case 7:
  49. return 16;
  50. default:
  51. return 0;
  52. }
  53. }
  54. static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq,
  55. bool sw_pending)
  56. {
  57. bool pending = false;
  58. spin_lock_bh(&txq->axq_lock);
  59. if (txq->axq_depth) {
  60. pending = true;
  61. goto out;
  62. }
  63. if (!sw_pending)
  64. goto out;
  65. if (txq->mac80211_qnum >= 0) {
  66. struct list_head *list;
  67. list = &sc->cur_chan->acq[txq->mac80211_qnum];
  68. if (!list_empty(list))
  69. pending = true;
  70. }
  71. out:
  72. spin_unlock_bh(&txq->axq_lock);
  73. return pending;
  74. }
  75. static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
  76. {
  77. unsigned long flags;
  78. bool ret;
  79. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  80. ret = ath9k_hw_setpower(sc->sc_ah, mode);
  81. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  82. return ret;
  83. }
  84. void ath_ps_full_sleep(unsigned long data)
  85. {
  86. struct ath_softc *sc = (struct ath_softc *) data;
  87. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  88. bool reset;
  89. spin_lock(&common->cc_lock);
  90. ath_hw_cycle_counters_update(common);
  91. spin_unlock(&common->cc_lock);
  92. ath9k_hw_setrxabort(sc->sc_ah, 1);
  93. ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
  94. ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
  95. }
  96. void ath9k_ps_wakeup(struct ath_softc *sc)
  97. {
  98. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  99. unsigned long flags;
  100. enum ath9k_power_mode power_mode;
  101. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  102. if (++sc->ps_usecount != 1)
  103. goto unlock;
  104. del_timer_sync(&sc->sleep_timer);
  105. power_mode = sc->sc_ah->power_mode;
  106. ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
  107. /*
  108. * While the hardware is asleep, the cycle counters contain no
  109. * useful data. Better clear them now so that they don't mess up
  110. * survey data results.
  111. */
  112. if (power_mode != ATH9K_PM_AWAKE) {
  113. spin_lock(&common->cc_lock);
  114. ath_hw_cycle_counters_update(common);
  115. memset(&common->cc_survey, 0, sizeof(common->cc_survey));
  116. memset(&common->cc_ani, 0, sizeof(common->cc_ani));
  117. spin_unlock(&common->cc_lock);
  118. }
  119. unlock:
  120. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  121. }
  122. void ath9k_ps_restore(struct ath_softc *sc)
  123. {
  124. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  125. enum ath9k_power_mode mode;
  126. unsigned long flags;
  127. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  128. if (--sc->ps_usecount != 0)
  129. goto unlock;
  130. if (sc->ps_idle) {
  131. mod_timer(&sc->sleep_timer, jiffies + HZ / 10);
  132. goto unlock;
  133. }
  134. if (sc->ps_enabled &&
  135. !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
  136. PS_WAIT_FOR_CAB |
  137. PS_WAIT_FOR_PSPOLL_DATA |
  138. PS_WAIT_FOR_TX_ACK |
  139. PS_WAIT_FOR_ANI))) {
  140. mode = ATH9K_PM_NETWORK_SLEEP;
  141. if (ath9k_hw_btcoex_is_enabled(sc->sc_ah))
  142. ath9k_btcoex_stop_gen_timer(sc);
  143. } else {
  144. goto unlock;
  145. }
  146. spin_lock(&common->cc_lock);
  147. ath_hw_cycle_counters_update(common);
  148. spin_unlock(&common->cc_lock);
  149. ath9k_hw_setpower(sc->sc_ah, mode);
  150. unlock:
  151. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  152. }
  153. static void __ath_cancel_work(struct ath_softc *sc)
  154. {
  155. cancel_work_sync(&sc->paprd_work);
  156. cancel_delayed_work_sync(&sc->tx_complete_work);
  157. cancel_delayed_work_sync(&sc->hw_pll_work);
  158. #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
  159. if (ath9k_hw_mci_is_enabled(sc->sc_ah))
  160. cancel_work_sync(&sc->mci_work);
  161. #endif
  162. }
  163. void ath_cancel_work(struct ath_softc *sc)
  164. {
  165. __ath_cancel_work(sc);
  166. cancel_work_sync(&sc->hw_reset_work);
  167. }
  168. void ath_restart_work(struct ath_softc *sc)
  169. {
  170. ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
  171. if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
  172. ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
  173. msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
  174. ath_start_ani(sc);
  175. }
  176. static bool ath_prepare_reset(struct ath_softc *sc)
  177. {
  178. struct ath_hw *ah = sc->sc_ah;
  179. bool ret = true;
  180. ieee80211_stop_queues(sc->hw);
  181. ath_stop_ani(sc);
  182. ath9k_hw_disable_interrupts(ah);
  183. if (AR_SREV_9300_20_OR_LATER(ah)) {
  184. ret &= ath_stoprecv(sc);
  185. ret &= ath_drain_all_txq(sc);
  186. } else {
  187. ret &= ath_drain_all_txq(sc);
  188. ret &= ath_stoprecv(sc);
  189. }
  190. return ret;
  191. }
  192. static bool ath_complete_reset(struct ath_softc *sc, bool start)
  193. {
  194. struct ath_hw *ah = sc->sc_ah;
  195. struct ath_common *common = ath9k_hw_common(ah);
  196. unsigned long flags;
  197. ath9k_calculate_summary_state(sc, sc->cur_chan);
  198. ath_startrecv(sc);
  199. ath9k_cmn_update_txpow(ah, sc->cur_chan->cur_txpower,
  200. sc->cur_chan->txpower,
  201. &sc->cur_chan->cur_txpower);
  202. clear_bit(ATH_OP_HW_RESET, &common->op_flags);
  203. if (!sc->cur_chan->offchannel && start) {
  204. /* restore per chanctx TSF timer */
  205. if (sc->cur_chan->tsf_val) {
  206. u32 offset;
  207. offset = ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts,
  208. NULL);
  209. ath9k_hw_settsf64(ah, sc->cur_chan->tsf_val + offset);
  210. }
  211. if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
  212. goto work;
  213. if (ah->opmode == NL80211_IFTYPE_STATION &&
  214. test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
  215. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  216. sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
  217. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  218. } else {
  219. ath9k_set_beacon(sc);
  220. }
  221. work:
  222. ath_restart_work(sc);
  223. ath_txq_schedule_all(sc);
  224. }
  225. sc->gtt_cnt = 0;
  226. ath9k_hw_set_interrupts(ah);
  227. ath9k_hw_enable_interrupts(ah);
  228. ieee80211_wake_queues(sc->hw);
  229. ath9k_p2p_ps_timer(sc);
  230. return true;
  231. }
  232. static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
  233. {
  234. struct ath_hw *ah = sc->sc_ah;
  235. struct ath_common *common = ath9k_hw_common(ah);
  236. struct ath9k_hw_cal_data *caldata = NULL;
  237. bool fastcc = true;
  238. int r;
  239. __ath_cancel_work(sc);
  240. disable_irq(sc->irq);
  241. tasklet_disable(&sc->intr_tq);
  242. tasklet_disable(&sc->bcon_tasklet);
  243. spin_lock_bh(&sc->sc_pcu_lock);
  244. if (!sc->cur_chan->offchannel) {
  245. fastcc = false;
  246. caldata = &sc->cur_chan->caldata;
  247. }
  248. if (!hchan) {
  249. fastcc = false;
  250. hchan = ah->curchan;
  251. }
  252. if (!ath_prepare_reset(sc))
  253. fastcc = false;
  254. if (ath9k_is_chanctx_enabled())
  255. fastcc = false;
  256. spin_lock_bh(&sc->chan_lock);
  257. sc->cur_chandef = sc->cur_chan->chandef;
  258. spin_unlock_bh(&sc->chan_lock);
  259. ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
  260. hchan->channel, IS_CHAN_HT40(hchan), fastcc);
  261. r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
  262. if (r) {
  263. ath_err(common,
  264. "Unable to reset channel, reset status %d\n", r);
  265. ath9k_hw_enable_interrupts(ah);
  266. ath9k_queue_reset(sc, RESET_TYPE_BB_HANG);
  267. goto out;
  268. }
  269. if (ath9k_hw_mci_is_enabled(sc->sc_ah) &&
  270. sc->cur_chan->offchannel)
  271. ath9k_mci_set_txpower(sc, true, false);
  272. if (!ath_complete_reset(sc, true))
  273. r = -EIO;
  274. out:
  275. enable_irq(sc->irq);
  276. spin_unlock_bh(&sc->sc_pcu_lock);
  277. tasklet_enable(&sc->bcon_tasklet);
  278. tasklet_enable(&sc->intr_tq);
  279. return r;
  280. }
  281. static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
  282. struct ieee80211_vif *vif)
  283. {
  284. struct ath_node *an;
  285. an = (struct ath_node *)sta->drv_priv;
  286. an->sc = sc;
  287. an->sta = sta;
  288. an->vif = vif;
  289. memset(&an->key_idx, 0, sizeof(an->key_idx));
  290. ath_tx_node_init(sc, an);
  291. ath_dynack_node_init(sc->sc_ah, an);
  292. }
  293. static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
  294. {
  295. struct ath_node *an = (struct ath_node *)sta->drv_priv;
  296. ath_tx_node_cleanup(sc, an);
  297. ath_dynack_node_deinit(sc->sc_ah, an);
  298. }
  299. void ath9k_tasklet(unsigned long data)
  300. {
  301. struct ath_softc *sc = (struct ath_softc *)data;
  302. struct ath_hw *ah = sc->sc_ah;
  303. struct ath_common *common = ath9k_hw_common(ah);
  304. enum ath_reset_type type;
  305. unsigned long flags;
  306. u32 status = sc->intrstatus;
  307. u32 rxmask;
  308. ath9k_ps_wakeup(sc);
  309. spin_lock(&sc->sc_pcu_lock);
  310. if (status & ATH9K_INT_FATAL) {
  311. type = RESET_TYPE_FATAL_INT;
  312. ath9k_queue_reset(sc, type);
  313. /*
  314. * Increment the ref. counter here so that
  315. * interrupts are enabled in the reset routine.
  316. */
  317. atomic_inc(&ah->intr_ref_cnt);
  318. ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
  319. goto out;
  320. }
  321. if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
  322. (status & ATH9K_INT_BB_WATCHDOG)) {
  323. spin_lock(&common->cc_lock);
  324. ath_hw_cycle_counters_update(common);
  325. ar9003_hw_bb_watchdog_dbg_info(ah);
  326. spin_unlock(&common->cc_lock);
  327. if (ar9003_hw_bb_watchdog_check(ah)) {
  328. type = RESET_TYPE_BB_WATCHDOG;
  329. ath9k_queue_reset(sc, type);
  330. /*
  331. * Increment the ref. counter here so that
  332. * interrupts are enabled in the reset routine.
  333. */
  334. atomic_inc(&ah->intr_ref_cnt);
  335. ath_dbg(common, RESET,
  336. "BB_WATCHDOG: Skipping interrupts\n");
  337. goto out;
  338. }
  339. }
  340. if (status & ATH9K_INT_GTT) {
  341. sc->gtt_cnt++;
  342. if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
  343. type = RESET_TYPE_TX_GTT;
  344. ath9k_queue_reset(sc, type);
  345. atomic_inc(&ah->intr_ref_cnt);
  346. ath_dbg(common, RESET,
  347. "GTT: Skipping interrupts\n");
  348. goto out;
  349. }
  350. }
  351. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  352. if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
  353. /*
  354. * TSF sync does not look correct; remain awake to sync with
  355. * the next Beacon.
  356. */
  357. ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
  358. sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
  359. }
  360. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  361. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
  362. rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
  363. ATH9K_INT_RXORN);
  364. else
  365. rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
  366. if (status & rxmask) {
  367. /* Check for high priority Rx first */
  368. if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
  369. (status & ATH9K_INT_RXHP))
  370. ath_rx_tasklet(sc, 0, true);
  371. ath_rx_tasklet(sc, 0, false);
  372. }
  373. if (status & ATH9K_INT_TX) {
  374. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
  375. /*
  376. * For EDMA chips, TX completion is enabled for the
  377. * beacon queue, so if a beacon has been transmitted
  378. * successfully after a GTT interrupt, the GTT counter
  379. * gets reset to zero here.
  380. */
  381. sc->gtt_cnt = 0;
  382. ath_tx_edma_tasklet(sc);
  383. } else {
  384. ath_tx_tasklet(sc);
  385. }
  386. wake_up(&sc->tx_wait);
  387. }
  388. if (status & ATH9K_INT_GENTIMER)
  389. ath_gen_timer_isr(sc->sc_ah);
  390. ath9k_btcoex_handle_interrupt(sc, status);
  391. /* re-enable hardware interrupt */
  392. ath9k_hw_enable_interrupts(ah);
  393. out:
  394. spin_unlock(&sc->sc_pcu_lock);
  395. ath9k_ps_restore(sc);
  396. }
  397. irqreturn_t ath_isr(int irq, void *dev)
  398. {
  399. #define SCHED_INTR ( \
  400. ATH9K_INT_FATAL | \
  401. ATH9K_INT_BB_WATCHDOG | \
  402. ATH9K_INT_RXORN | \
  403. ATH9K_INT_RXEOL | \
  404. ATH9K_INT_RX | \
  405. ATH9K_INT_RXLP | \
  406. ATH9K_INT_RXHP | \
  407. ATH9K_INT_TX | \
  408. ATH9K_INT_BMISS | \
  409. ATH9K_INT_CST | \
  410. ATH9K_INT_GTT | \
  411. ATH9K_INT_TSFOOR | \
  412. ATH9K_INT_GENTIMER | \
  413. ATH9K_INT_MCI)
  414. struct ath_softc *sc = dev;
  415. struct ath_hw *ah = sc->sc_ah;
  416. struct ath_common *common = ath9k_hw_common(ah);
  417. enum ath9k_int status;
  418. u32 sync_cause = 0;
  419. bool sched = false;
  420. /*
  421. * The hardware is not ready/present, don't
  422. * touch anything. Note this can happen early
  423. * on if the IRQ is shared.
  424. */
  425. if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
  426. return IRQ_NONE;
  427. /* shared irq, not for us */
  428. if (!ath9k_hw_intrpend(ah))
  429. return IRQ_NONE;
  430. /*
  431. * Figure out the reason(s) for the interrupt. Note
  432. * that the hal returns a pseudo-ISR that may include
  433. * bits we haven't explicitly enabled so we mask the
  434. * value to insure we only process bits we requested.
  435. */
  436. ath9k_hw_getisr(ah, &status, &sync_cause); /* NB: clears ISR too */
  437. ath9k_debug_sync_cause(sc, sync_cause);
  438. status &= ah->imask; /* discard unasked-for bits */
  439. if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
  440. return IRQ_HANDLED;
  441. /*
  442. * If there are no status bits set, then this interrupt was not
  443. * for me (should have been caught above).
  444. */
  445. if (!status)
  446. return IRQ_NONE;
  447. /* Cache the status */
  448. sc->intrstatus = status;
  449. if (status & SCHED_INTR)
  450. sched = true;
  451. /*
  452. * If a FATAL interrupt is received, we have to reset the chip
  453. * immediately.
  454. */
  455. if (status & ATH9K_INT_FATAL)
  456. goto chip_reset;
  457. if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
  458. (status & ATH9K_INT_BB_WATCHDOG))
  459. goto chip_reset;
  460. if (status & ATH9K_INT_SWBA)
  461. tasklet_schedule(&sc->bcon_tasklet);
  462. if (status & ATH9K_INT_TXURN)
  463. ath9k_hw_updatetxtriglevel(ah, true);
  464. if (status & ATH9K_INT_RXEOL) {
  465. ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
  466. ath9k_hw_set_interrupts(ah);
  467. }
  468. if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  469. if (status & ATH9K_INT_TIM_TIMER) {
  470. if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle))
  471. goto chip_reset;
  472. /* Clear RxAbort bit so that we can
  473. * receive frames */
  474. ath9k_setpower(sc, ATH9K_PM_AWAKE);
  475. spin_lock(&sc->sc_pm_lock);
  476. ath9k_hw_setrxabort(sc->sc_ah, 0);
  477. sc->ps_flags |= PS_WAIT_FOR_BEACON;
  478. spin_unlock(&sc->sc_pm_lock);
  479. }
  480. chip_reset:
  481. ath_debug_stat_interrupt(sc, status);
  482. if (sched) {
  483. /* turn off every interrupt */
  484. ath9k_hw_disable_interrupts(ah);
  485. tasklet_schedule(&sc->intr_tq);
  486. }
  487. return IRQ_HANDLED;
  488. #undef SCHED_INTR
  489. }
  490. /*
  491. * This function is called when a HW reset cannot be deferred
  492. * and has to be immediate.
  493. */
  494. int ath_reset(struct ath_softc *sc, struct ath9k_channel *hchan)
  495. {
  496. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  497. int r;
  498. ath9k_hw_kill_interrupts(sc->sc_ah);
  499. set_bit(ATH_OP_HW_RESET, &common->op_flags);
  500. ath9k_ps_wakeup(sc);
  501. r = ath_reset_internal(sc, hchan);
  502. ath9k_ps_restore(sc);
  503. return r;
  504. }
  505. /*
  506. * When a HW reset can be deferred, it is added to the
  507. * hw_reset_work workqueue, but we set ATH_OP_HW_RESET before
  508. * queueing.
  509. */
  510. void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
  511. {
  512. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  513. #ifdef CONFIG_ATH9K_DEBUGFS
  514. RESET_STAT_INC(sc, type);
  515. #endif
  516. ath9k_hw_kill_interrupts(sc->sc_ah);
  517. set_bit(ATH_OP_HW_RESET, &common->op_flags);
  518. ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
  519. }
  520. void ath_reset_work(struct work_struct *work)
  521. {
  522. struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
  523. ath9k_ps_wakeup(sc);
  524. ath_reset_internal(sc, NULL);
  525. ath9k_ps_restore(sc);
  526. }
  527. /**********************/
  528. /* mac80211 callbacks */
  529. /**********************/
  530. static int ath9k_start(struct ieee80211_hw *hw)
  531. {
  532. struct ath_softc *sc = hw->priv;
  533. struct ath_hw *ah = sc->sc_ah;
  534. struct ath_common *common = ath9k_hw_common(ah);
  535. struct ieee80211_channel *curchan = sc->cur_chan->chandef.chan;
  536. struct ath_chanctx *ctx = sc->cur_chan;
  537. struct ath9k_channel *init_channel;
  538. int r;
  539. ath_dbg(common, CONFIG,
  540. "Starting driver with initial channel: %d MHz\n",
  541. curchan->center_freq);
  542. ath9k_ps_wakeup(sc);
  543. mutex_lock(&sc->mutex);
  544. init_channel = ath9k_cmn_get_channel(hw, ah, &ctx->chandef);
  545. sc->cur_chandef = hw->conf.chandef;
  546. /* Reset SERDES registers */
  547. ath9k_hw_configpcipowersave(ah, false);
  548. /*
  549. * The basic interface to setting the hardware in a good
  550. * state is ``reset''. On return the hardware is known to
  551. * be powered up and with interrupts disabled. This must
  552. * be followed by initialization of the appropriate bits
  553. * and then setup of the interrupt mask.
  554. */
  555. spin_lock_bh(&sc->sc_pcu_lock);
  556. atomic_set(&ah->intr_ref_cnt, -1);
  557. r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
  558. if (r) {
  559. ath_err(common,
  560. "Unable to reset hardware; reset status %d (freq %u MHz)\n",
  561. r, curchan->center_freq);
  562. ah->reset_power_on = false;
  563. }
  564. /* Setup our intr mask. */
  565. ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
  566. ATH9K_INT_RXORN | ATH9K_INT_FATAL |
  567. ATH9K_INT_GLOBAL;
  568. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
  569. ah->imask |= ATH9K_INT_RXHP |
  570. ATH9K_INT_RXLP;
  571. else
  572. ah->imask |= ATH9K_INT_RX;
  573. if (ah->config.hw_hang_checks & HW_BB_WATCHDOG)
  574. ah->imask |= ATH9K_INT_BB_WATCHDOG;
  575. /*
  576. * Enable GTT interrupts only for AR9003/AR9004 chips
  577. * for now.
  578. */
  579. if (AR_SREV_9300_20_OR_LATER(ah))
  580. ah->imask |= ATH9K_INT_GTT;
  581. if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
  582. ah->imask |= ATH9K_INT_CST;
  583. ath_mci_enable(sc);
  584. clear_bit(ATH_OP_INVALID, &common->op_flags);
  585. sc->sc_ah->is_monitoring = false;
  586. if (!ath_complete_reset(sc, false))
  587. ah->reset_power_on = false;
  588. if (ah->led_pin >= 0) {
  589. ath9k_hw_cfg_output(ah, ah->led_pin,
  590. AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
  591. ath9k_hw_set_gpio(ah, ah->led_pin,
  592. (ah->config.led_active_high) ? 1 : 0);
  593. }
  594. /*
  595. * Reset key cache to sane defaults (all entries cleared) instead of
  596. * semi-random values after suspend/resume.
  597. */
  598. ath9k_cmn_init_crypto(sc->sc_ah);
  599. ath9k_hw_reset_tsf(ah);
  600. spin_unlock_bh(&sc->sc_pcu_lock);
  601. mutex_unlock(&sc->mutex);
  602. ath9k_ps_restore(sc);
  603. ath9k_rng_start(sc);
  604. return 0;
  605. }
  606. static void ath9k_tx(struct ieee80211_hw *hw,
  607. struct ieee80211_tx_control *control,
  608. struct sk_buff *skb)
  609. {
  610. struct ath_softc *sc = hw->priv;
  611. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  612. struct ath_tx_control txctl;
  613. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  614. unsigned long flags;
  615. if (sc->ps_enabled) {
  616. /*
  617. * mac80211 does not set PM field for normal data frames, so we
  618. * need to update that based on the current PS mode.
  619. */
  620. if (ieee80211_is_data(hdr->frame_control) &&
  621. !ieee80211_is_nullfunc(hdr->frame_control) &&
  622. !ieee80211_has_pm(hdr->frame_control)) {
  623. ath_dbg(common, PS,
  624. "Add PM=1 for a TX frame while in PS mode\n");
  625. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
  626. }
  627. }
  628. if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP)) {
  629. /*
  630. * We are using PS-Poll and mac80211 can request TX while in
  631. * power save mode. Need to wake up hardware for the TX to be
  632. * completed and if needed, also for RX of buffered frames.
  633. */
  634. ath9k_ps_wakeup(sc);
  635. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  636. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  637. ath9k_hw_setrxabort(sc->sc_ah, 0);
  638. if (ieee80211_is_pspoll(hdr->frame_control)) {
  639. ath_dbg(common, PS,
  640. "Sending PS-Poll to pick a buffered frame\n");
  641. sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
  642. } else {
  643. ath_dbg(common, PS, "Wake up to complete TX\n");
  644. sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
  645. }
  646. /*
  647. * The actual restore operation will happen only after
  648. * the ps_flags bit is cleared. We are just dropping
  649. * the ps_usecount here.
  650. */
  651. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  652. ath9k_ps_restore(sc);
  653. }
  654. /*
  655. * Cannot tx while the hardware is in full sleep, it first needs a full
  656. * chip reset to recover from that
  657. */
  658. if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) {
  659. ath_err(common, "TX while HW is in FULL_SLEEP mode\n");
  660. goto exit;
  661. }
  662. memset(&txctl, 0, sizeof(struct ath_tx_control));
  663. txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
  664. txctl.sta = control->sta;
  665. ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
  666. if (ath_tx_start(hw, skb, &txctl) != 0) {
  667. ath_dbg(common, XMIT, "TX failed\n");
  668. TX_STAT_INC(txctl.txq->axq_qnum, txfailed);
  669. goto exit;
  670. }
  671. return;
  672. exit:
  673. ieee80211_free_txskb(hw, skb);
  674. }
  675. static void ath9k_stop(struct ieee80211_hw *hw)
  676. {
  677. struct ath_softc *sc = hw->priv;
  678. struct ath_hw *ah = sc->sc_ah;
  679. struct ath_common *common = ath9k_hw_common(ah);
  680. bool prev_idle;
  681. ath9k_deinit_channel_context(sc);
  682. ath9k_rng_stop(sc);
  683. mutex_lock(&sc->mutex);
  684. ath_cancel_work(sc);
  685. if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
  686. ath_dbg(common, ANY, "Device not present\n");
  687. mutex_unlock(&sc->mutex);
  688. return;
  689. }
  690. /* Ensure HW is awake when we try to shut it down. */
  691. ath9k_ps_wakeup(sc);
  692. spin_lock_bh(&sc->sc_pcu_lock);
  693. /* prevent tasklets to enable interrupts once we disable them */
  694. ah->imask &= ~ATH9K_INT_GLOBAL;
  695. /* make sure h/w will not generate any interrupt
  696. * before setting the invalid flag. */
  697. ath9k_hw_disable_interrupts(ah);
  698. spin_unlock_bh(&sc->sc_pcu_lock);
  699. /* we can now sync irq and kill any running tasklets, since we already
  700. * disabled interrupts and not holding a spin lock */
  701. synchronize_irq(sc->irq);
  702. tasklet_kill(&sc->intr_tq);
  703. tasklet_kill(&sc->bcon_tasklet);
  704. prev_idle = sc->ps_idle;
  705. sc->ps_idle = true;
  706. spin_lock_bh(&sc->sc_pcu_lock);
  707. if (ah->led_pin >= 0) {
  708. ath9k_hw_set_gpio(ah, ah->led_pin,
  709. (ah->config.led_active_high) ? 0 : 1);
  710. ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
  711. }
  712. ath_prepare_reset(sc);
  713. if (sc->rx.frag) {
  714. dev_kfree_skb_any(sc->rx.frag);
  715. sc->rx.frag = NULL;
  716. }
  717. if (!ah->curchan)
  718. ah->curchan = ath9k_cmn_get_channel(hw, ah,
  719. &sc->cur_chan->chandef);
  720. ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
  721. set_bit(ATH_OP_INVALID, &common->op_flags);
  722. ath9k_hw_phy_disable(ah);
  723. ath9k_hw_configpcipowersave(ah, true);
  724. spin_unlock_bh(&sc->sc_pcu_lock);
  725. ath9k_ps_restore(sc);
  726. sc->ps_idle = prev_idle;
  727. mutex_unlock(&sc->mutex);
  728. ath_dbg(common, CONFIG, "Driver halt\n");
  729. }
  730. static bool ath9k_uses_beacons(int type)
  731. {
  732. switch (type) {
  733. case NL80211_IFTYPE_AP:
  734. case NL80211_IFTYPE_ADHOC:
  735. case NL80211_IFTYPE_MESH_POINT:
  736. return true;
  737. default:
  738. return false;
  739. }
  740. }
  741. static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data,
  742. u8 *mac, struct ieee80211_vif *vif)
  743. {
  744. struct ath_vif *avp = (struct ath_vif *)vif->drv_priv;
  745. int i;
  746. if (iter_data->has_hw_macaddr) {
  747. for (i = 0; i < ETH_ALEN; i++)
  748. iter_data->mask[i] &=
  749. ~(iter_data->hw_macaddr[i] ^ mac[i]);
  750. } else {
  751. memcpy(iter_data->hw_macaddr, mac, ETH_ALEN);
  752. iter_data->has_hw_macaddr = true;
  753. }
  754. if (!vif->bss_conf.use_short_slot)
  755. iter_data->slottime = ATH9K_SLOT_TIME_20;
  756. switch (vif->type) {
  757. case NL80211_IFTYPE_AP:
  758. iter_data->naps++;
  759. break;
  760. case NL80211_IFTYPE_STATION:
  761. iter_data->nstations++;
  762. if (avp->assoc && !iter_data->primary_sta)
  763. iter_data->primary_sta = vif;
  764. break;
  765. case NL80211_IFTYPE_OCB:
  766. iter_data->nocbs++;
  767. break;
  768. case NL80211_IFTYPE_ADHOC:
  769. iter_data->nadhocs++;
  770. if (vif->bss_conf.enable_beacon)
  771. iter_data->beacons = true;
  772. break;
  773. case NL80211_IFTYPE_MESH_POINT:
  774. iter_data->nmeshes++;
  775. if (vif->bss_conf.enable_beacon)
  776. iter_data->beacons = true;
  777. break;
  778. case NL80211_IFTYPE_WDS:
  779. iter_data->nwds++;
  780. break;
  781. default:
  782. break;
  783. }
  784. }
  785. static void ath9k_update_bssid_mask(struct ath_softc *sc,
  786. struct ath_chanctx *ctx,
  787. struct ath9k_vif_iter_data *iter_data)
  788. {
  789. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  790. struct ath_vif *avp;
  791. int i;
  792. if (!ath9k_is_chanctx_enabled())
  793. return;
  794. list_for_each_entry(avp, &ctx->vifs, list) {
  795. if (ctx->nvifs_assigned != 1)
  796. continue;
  797. if (!iter_data->has_hw_macaddr)
  798. continue;
  799. ether_addr_copy(common->curbssid, avp->bssid);
  800. /* perm_addr will be used as the p2p device address. */
  801. for (i = 0; i < ETH_ALEN; i++)
  802. iter_data->mask[i] &=
  803. ~(iter_data->hw_macaddr[i] ^
  804. sc->hw->wiphy->perm_addr[i]);
  805. }
  806. }
  807. /* Called with sc->mutex held. */
  808. void ath9k_calculate_iter_data(struct ath_softc *sc,
  809. struct ath_chanctx *ctx,
  810. struct ath9k_vif_iter_data *iter_data)
  811. {
  812. struct ath_vif *avp;
  813. /*
  814. * The hardware will use primary station addr together with the
  815. * BSSID mask when matching addresses.
  816. */
  817. memset(iter_data, 0, sizeof(*iter_data));
  818. eth_broadcast_addr(iter_data->mask);
  819. iter_data->slottime = ATH9K_SLOT_TIME_9;
  820. list_for_each_entry(avp, &ctx->vifs, list)
  821. ath9k_vif_iter(iter_data, avp->vif->addr, avp->vif);
  822. ath9k_update_bssid_mask(sc, ctx, iter_data);
  823. }
  824. static void ath9k_set_assoc_state(struct ath_softc *sc,
  825. struct ieee80211_vif *vif, bool changed)
  826. {
  827. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  828. struct ath_vif *avp = (struct ath_vif *)vif->drv_priv;
  829. unsigned long flags;
  830. set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
  831. ether_addr_copy(common->curbssid, avp->bssid);
  832. common->curaid = avp->aid;
  833. ath9k_hw_write_associd(sc->sc_ah);
  834. if (changed) {
  835. common->last_rssi = ATH_RSSI_DUMMY_MARKER;
  836. sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
  837. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  838. sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
  839. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  840. }
  841. if (ath9k_hw_mci_is_enabled(sc->sc_ah))
  842. ath9k_mci_update_wlan_channels(sc, false);
  843. ath_dbg(common, CONFIG,
  844. "Primary Station interface: %pM, BSSID: %pM\n",
  845. vif->addr, common->curbssid);
  846. }
  847. #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
  848. static void ath9k_set_offchannel_state(struct ath_softc *sc)
  849. {
  850. struct ath_hw *ah = sc->sc_ah;
  851. struct ath_common *common = ath9k_hw_common(ah);
  852. struct ieee80211_vif *vif = NULL;
  853. ath9k_ps_wakeup(sc);
  854. if (sc->offchannel.state < ATH_OFFCHANNEL_ROC_START)
  855. vif = sc->offchannel.scan_vif;
  856. else
  857. vif = sc->offchannel.roc_vif;
  858. if (WARN_ON(!vif))
  859. goto exit;
  860. eth_zero_addr(common->curbssid);
  861. eth_broadcast_addr(common->bssidmask);
  862. memcpy(common->macaddr, vif->addr, ETH_ALEN);
  863. common->curaid = 0;
  864. ah->opmode = vif->type;
  865. ah->imask &= ~ATH9K_INT_SWBA;
  866. ah->imask &= ~ATH9K_INT_TSFOOR;
  867. ah->slottime = ATH9K_SLOT_TIME_9;
  868. ath_hw_setbssidmask(common);
  869. ath9k_hw_setopmode(ah);
  870. ath9k_hw_write_associd(sc->sc_ah);
  871. ath9k_hw_set_interrupts(ah);
  872. ath9k_hw_init_global_settings(ah);
  873. exit:
  874. ath9k_ps_restore(sc);
  875. }
  876. #endif
  877. /* Called with sc->mutex held. */
  878. void ath9k_calculate_summary_state(struct ath_softc *sc,
  879. struct ath_chanctx *ctx)
  880. {
  881. struct ath_hw *ah = sc->sc_ah;
  882. struct ath_common *common = ath9k_hw_common(ah);
  883. struct ath9k_vif_iter_data iter_data;
  884. struct ath_beacon_config *cur_conf;
  885. ath_chanctx_check_active(sc, ctx);
  886. if (ctx != sc->cur_chan)
  887. return;
  888. #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
  889. if (ctx == &sc->offchannel.chan)
  890. return ath9k_set_offchannel_state(sc);
  891. #endif
  892. ath9k_ps_wakeup(sc);
  893. ath9k_calculate_iter_data(sc, ctx, &iter_data);
  894. if (iter_data.has_hw_macaddr)
  895. memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
  896. memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
  897. ath_hw_setbssidmask(common);
  898. if (iter_data.naps > 0) {
  899. cur_conf = &ctx->beacon;
  900. ath9k_hw_set_tsfadjust(ah, true);
  901. ah->opmode = NL80211_IFTYPE_AP;
  902. if (cur_conf->enable_beacon)
  903. iter_data.beacons = true;
  904. } else {
  905. ath9k_hw_set_tsfadjust(ah, false);
  906. if (iter_data.nmeshes)
  907. ah->opmode = NL80211_IFTYPE_MESH_POINT;
  908. else if (iter_data.nocbs)
  909. ah->opmode = NL80211_IFTYPE_OCB;
  910. else if (iter_data.nwds)
  911. ah->opmode = NL80211_IFTYPE_AP;
  912. else if (iter_data.nadhocs)
  913. ah->opmode = NL80211_IFTYPE_ADHOC;
  914. else
  915. ah->opmode = NL80211_IFTYPE_STATION;
  916. }
  917. ath9k_hw_setopmode(ah);
  918. ctx->switch_after_beacon = false;
  919. if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0)
  920. ah->imask |= ATH9K_INT_TSFOOR;
  921. else {
  922. ah->imask &= ~ATH9K_INT_TSFOOR;
  923. if (iter_data.naps == 1 && iter_data.beacons)
  924. ctx->switch_after_beacon = true;
  925. }
  926. ah->imask &= ~ATH9K_INT_SWBA;
  927. if (ah->opmode == NL80211_IFTYPE_STATION) {
  928. bool changed = (iter_data.primary_sta != ctx->primary_sta);
  929. if (iter_data.primary_sta) {
  930. iter_data.beacons = true;
  931. ath9k_set_assoc_state(sc, iter_data.primary_sta,
  932. changed);
  933. ctx->primary_sta = iter_data.primary_sta;
  934. } else {
  935. ctx->primary_sta = NULL;
  936. eth_zero_addr(common->curbssid);
  937. common->curaid = 0;
  938. ath9k_hw_write_associd(sc->sc_ah);
  939. if (ath9k_hw_mci_is_enabled(sc->sc_ah))
  940. ath9k_mci_update_wlan_channels(sc, true);
  941. }
  942. } else if (iter_data.beacons) {
  943. ah->imask |= ATH9K_INT_SWBA;
  944. }
  945. ath9k_hw_set_interrupts(ah);
  946. if (iter_data.beacons)
  947. set_bit(ATH_OP_BEACONS, &common->op_flags);
  948. else
  949. clear_bit(ATH_OP_BEACONS, &common->op_flags);
  950. if (ah->slottime != iter_data.slottime) {
  951. ah->slottime = iter_data.slottime;
  952. ath9k_hw_init_global_settings(ah);
  953. }
  954. if (iter_data.primary_sta)
  955. set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
  956. else
  957. clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
  958. ath_dbg(common, CONFIG,
  959. "macaddr: %pM, bssid: %pM, bssidmask: %pM\n",
  960. common->macaddr, common->curbssid, common->bssidmask);
  961. ath9k_ps_restore(sc);
  962. }
  963. static void ath9k_tpc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
  964. {
  965. int *power = (int *)data;
  966. if (*power < vif->bss_conf.txpower)
  967. *power = vif->bss_conf.txpower;
  968. }
  969. /* Called with sc->mutex held. */
  970. void ath9k_set_txpower(struct ath_softc *sc, struct ieee80211_vif *vif)
  971. {
  972. int power;
  973. struct ath_hw *ah = sc->sc_ah;
  974. struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
  975. ath9k_ps_wakeup(sc);
  976. if (ah->tpc_enabled) {
  977. power = (vif) ? vif->bss_conf.txpower : -1;
  978. ieee80211_iterate_active_interfaces_atomic(
  979. sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
  980. ath9k_tpc_vif_iter, &power);
  981. if (power == -1)
  982. power = sc->hw->conf.power_level;
  983. } else {
  984. power = sc->hw->conf.power_level;
  985. }
  986. sc->cur_chan->txpower = 2 * power;
  987. ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
  988. sc->cur_chan->cur_txpower = reg->max_power_level;
  989. ath9k_ps_restore(sc);
  990. }
  991. static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
  992. struct ieee80211_vif *vif)
  993. {
  994. int i;
  995. if (!ath9k_is_chanctx_enabled())
  996. return;
  997. for (i = 0; i < IEEE80211_NUM_ACS; i++)
  998. vif->hw_queue[i] = i;
  999. if (vif->type == NL80211_IFTYPE_AP ||
  1000. vif->type == NL80211_IFTYPE_MESH_POINT)
  1001. vif->cab_queue = hw->queues - 2;
  1002. else
  1003. vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
  1004. }
  1005. static int ath9k_add_interface(struct ieee80211_hw *hw,
  1006. struct ieee80211_vif *vif)
  1007. {
  1008. struct ath_softc *sc = hw->priv;
  1009. struct ath_hw *ah = sc->sc_ah;
  1010. struct ath_common *common = ath9k_hw_common(ah);
  1011. struct ath_vif *avp = (void *)vif->drv_priv;
  1012. struct ath_node *an = &avp->mcast_node;
  1013. mutex_lock(&sc->mutex);
  1014. if (config_enabled(CONFIG_ATH9K_TX99)) {
  1015. if (sc->cur_chan->nvifs >= 1) {
  1016. mutex_unlock(&sc->mutex);
  1017. return -EOPNOTSUPP;
  1018. }
  1019. sc->tx99_vif = vif;
  1020. }
  1021. ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
  1022. sc->cur_chan->nvifs++;
  1023. if (vif->type == NL80211_IFTYPE_STATION && ath9k_is_chanctx_enabled())
  1024. vif->driver_flags |= IEEE80211_VIF_GET_NOA_UPDATE;
  1025. if (ath9k_uses_beacons(vif->type))
  1026. ath9k_beacon_assign_slot(sc, vif);
  1027. avp->vif = vif;
  1028. if (!ath9k_is_chanctx_enabled()) {
  1029. avp->chanctx = sc->cur_chan;
  1030. list_add_tail(&avp->list, &avp->chanctx->vifs);
  1031. }
  1032. ath9k_calculate_summary_state(sc, avp->chanctx);
  1033. ath9k_assign_hw_queues(hw, vif);
  1034. ath9k_set_txpower(sc, vif);
  1035. an->sc = sc;
  1036. an->sta = NULL;
  1037. an->vif = vif;
  1038. an->no_ps_filter = true;
  1039. ath_tx_node_init(sc, an);
  1040. mutex_unlock(&sc->mutex);
  1041. return 0;
  1042. }
  1043. static int ath9k_change_interface(struct ieee80211_hw *hw,
  1044. struct ieee80211_vif *vif,
  1045. enum nl80211_iftype new_type,
  1046. bool p2p)
  1047. {
  1048. struct ath_softc *sc = hw->priv;
  1049. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1050. struct ath_vif *avp = (void *)vif->drv_priv;
  1051. mutex_lock(&sc->mutex);
  1052. if (config_enabled(CONFIG_ATH9K_TX99)) {
  1053. mutex_unlock(&sc->mutex);
  1054. return -EOPNOTSUPP;
  1055. }
  1056. ath_dbg(common, CONFIG, "Change Interface\n");
  1057. if (ath9k_uses_beacons(vif->type))
  1058. ath9k_beacon_remove_slot(sc, vif);
  1059. vif->type = new_type;
  1060. vif->p2p = p2p;
  1061. if (ath9k_uses_beacons(vif->type))
  1062. ath9k_beacon_assign_slot(sc, vif);
  1063. ath9k_assign_hw_queues(hw, vif);
  1064. ath9k_calculate_summary_state(sc, avp->chanctx);
  1065. ath9k_set_txpower(sc, vif);
  1066. mutex_unlock(&sc->mutex);
  1067. return 0;
  1068. }
  1069. static void ath9k_remove_interface(struct ieee80211_hw *hw,
  1070. struct ieee80211_vif *vif)
  1071. {
  1072. struct ath_softc *sc = hw->priv;
  1073. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1074. struct ath_vif *avp = (void *)vif->drv_priv;
  1075. ath_dbg(common, CONFIG, "Detach Interface\n");
  1076. mutex_lock(&sc->mutex);
  1077. ath9k_p2p_remove_vif(sc, vif);
  1078. sc->cur_chan->nvifs--;
  1079. sc->tx99_vif = NULL;
  1080. if (!ath9k_is_chanctx_enabled())
  1081. list_del(&avp->list);
  1082. if (ath9k_uses_beacons(vif->type))
  1083. ath9k_beacon_remove_slot(sc, vif);
  1084. ath_tx_node_cleanup(sc, &avp->mcast_node);
  1085. ath9k_calculate_summary_state(sc, avp->chanctx);
  1086. ath9k_set_txpower(sc, NULL);
  1087. mutex_unlock(&sc->mutex);
  1088. }
  1089. static void ath9k_enable_ps(struct ath_softc *sc)
  1090. {
  1091. struct ath_hw *ah = sc->sc_ah;
  1092. struct ath_common *common = ath9k_hw_common(ah);
  1093. if (config_enabled(CONFIG_ATH9K_TX99))
  1094. return;
  1095. sc->ps_enabled = true;
  1096. if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
  1097. if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
  1098. ah->imask |= ATH9K_INT_TIM_TIMER;
  1099. ath9k_hw_set_interrupts(ah);
  1100. }
  1101. ath9k_hw_setrxabort(ah, 1);
  1102. }
  1103. ath_dbg(common, PS, "PowerSave enabled\n");
  1104. }
  1105. static void ath9k_disable_ps(struct ath_softc *sc)
  1106. {
  1107. struct ath_hw *ah = sc->sc_ah;
  1108. struct ath_common *common = ath9k_hw_common(ah);
  1109. if (config_enabled(CONFIG_ATH9K_TX99))
  1110. return;
  1111. sc->ps_enabled = false;
  1112. ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
  1113. if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
  1114. ath9k_hw_setrxabort(ah, 0);
  1115. sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
  1116. PS_WAIT_FOR_CAB |
  1117. PS_WAIT_FOR_PSPOLL_DATA |
  1118. PS_WAIT_FOR_TX_ACK);
  1119. if (ah->imask & ATH9K_INT_TIM_TIMER) {
  1120. ah->imask &= ~ATH9K_INT_TIM_TIMER;
  1121. ath9k_hw_set_interrupts(ah);
  1122. }
  1123. }
  1124. ath_dbg(common, PS, "PowerSave disabled\n");
  1125. }
  1126. static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
  1127. {
  1128. struct ath_softc *sc = hw->priv;
  1129. struct ath_hw *ah = sc->sc_ah;
  1130. struct ath_common *common = ath9k_hw_common(ah);
  1131. struct ieee80211_conf *conf = &hw->conf;
  1132. struct ath_chanctx *ctx = sc->cur_chan;
  1133. ath9k_ps_wakeup(sc);
  1134. mutex_lock(&sc->mutex);
  1135. if (changed & IEEE80211_CONF_CHANGE_IDLE) {
  1136. sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
  1137. if (sc->ps_idle) {
  1138. ath_cancel_work(sc);
  1139. ath9k_stop_btcoex(sc);
  1140. } else {
  1141. ath9k_start_btcoex(sc);
  1142. /*
  1143. * The chip needs a reset to properly wake up from
  1144. * full sleep
  1145. */
  1146. ath_chanctx_set_channel(sc, ctx, &ctx->chandef);
  1147. }
  1148. }
  1149. /*
  1150. * We just prepare to enable PS. We have to wait until our AP has
  1151. * ACK'd our null data frame to disable RX otherwise we'll ignore
  1152. * those ACKs and end up retransmitting the same null data frames.
  1153. * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode.
  1154. */
  1155. if (changed & IEEE80211_CONF_CHANGE_PS) {
  1156. unsigned long flags;
  1157. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  1158. if (conf->flags & IEEE80211_CONF_PS)
  1159. ath9k_enable_ps(sc);
  1160. else
  1161. ath9k_disable_ps(sc);
  1162. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  1163. }
  1164. if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
  1165. if (conf->flags & IEEE80211_CONF_MONITOR) {
  1166. ath_dbg(common, CONFIG, "Monitor mode is enabled\n");
  1167. sc->sc_ah->is_monitoring = true;
  1168. } else {
  1169. ath_dbg(common, CONFIG, "Monitor mode is disabled\n");
  1170. sc->sc_ah->is_monitoring = false;
  1171. }
  1172. }
  1173. if (!ath9k_is_chanctx_enabled() && (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
  1174. ctx->offchannel = !!(conf->flags & IEEE80211_CONF_OFFCHANNEL);
  1175. ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
  1176. }
  1177. mutex_unlock(&sc->mutex);
  1178. ath9k_ps_restore(sc);
  1179. return 0;
  1180. }
  1181. #define SUPPORTED_FILTERS \
  1182. (FIF_ALLMULTI | \
  1183. FIF_CONTROL | \
  1184. FIF_PSPOLL | \
  1185. FIF_OTHER_BSS | \
  1186. FIF_BCN_PRBRESP_PROMISC | \
  1187. FIF_PROBE_REQ | \
  1188. FIF_FCSFAIL)
  1189. /* FIXME: sc->sc_full_reset ? */
  1190. static void ath9k_configure_filter(struct ieee80211_hw *hw,
  1191. unsigned int changed_flags,
  1192. unsigned int *total_flags,
  1193. u64 multicast)
  1194. {
  1195. struct ath_softc *sc = hw->priv;
  1196. struct ath_chanctx *ctx;
  1197. u32 rfilt;
  1198. changed_flags &= SUPPORTED_FILTERS;
  1199. *total_flags &= SUPPORTED_FILTERS;
  1200. spin_lock_bh(&sc->chan_lock);
  1201. ath_for_each_chanctx(sc, ctx)
  1202. ctx->rxfilter = *total_flags;
  1203. #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
  1204. sc->offchannel.chan.rxfilter = *total_flags;
  1205. #endif
  1206. spin_unlock_bh(&sc->chan_lock);
  1207. ath9k_ps_wakeup(sc);
  1208. rfilt = ath_calcrxfilter(sc);
  1209. ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
  1210. ath9k_ps_restore(sc);
  1211. ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Set HW RX filter: 0x%x\n",
  1212. rfilt);
  1213. }
  1214. static int ath9k_sta_add(struct ieee80211_hw *hw,
  1215. struct ieee80211_vif *vif,
  1216. struct ieee80211_sta *sta)
  1217. {
  1218. struct ath_softc *sc = hw->priv;
  1219. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1220. struct ath_node *an = (struct ath_node *) sta->drv_priv;
  1221. struct ieee80211_key_conf ps_key = { };
  1222. int key;
  1223. ath_node_attach(sc, sta, vif);
  1224. if (vif->type != NL80211_IFTYPE_AP &&
  1225. vif->type != NL80211_IFTYPE_AP_VLAN)
  1226. return 0;
  1227. key = ath_key_config(common, vif, sta, &ps_key);
  1228. if (key > 0) {
  1229. an->ps_key = key;
  1230. an->key_idx[0] = key;
  1231. }
  1232. return 0;
  1233. }
  1234. static void ath9k_del_ps_key(struct ath_softc *sc,
  1235. struct ieee80211_vif *vif,
  1236. struct ieee80211_sta *sta)
  1237. {
  1238. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1239. struct ath_node *an = (struct ath_node *) sta->drv_priv;
  1240. struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };
  1241. if (!an->ps_key)
  1242. return;
  1243. ath_key_delete(common, &ps_key);
  1244. an->ps_key = 0;
  1245. an->key_idx[0] = 0;
  1246. }
  1247. static int ath9k_sta_remove(struct ieee80211_hw *hw,
  1248. struct ieee80211_vif *vif,
  1249. struct ieee80211_sta *sta)
  1250. {
  1251. struct ath_softc *sc = hw->priv;
  1252. ath9k_del_ps_key(sc, vif, sta);
  1253. ath_node_detach(sc, sta);
  1254. return 0;
  1255. }
  1256. static int ath9k_sta_state(struct ieee80211_hw *hw,
  1257. struct ieee80211_vif *vif,
  1258. struct ieee80211_sta *sta,
  1259. enum ieee80211_sta_state old_state,
  1260. enum ieee80211_sta_state new_state)
  1261. {
  1262. struct ath_softc *sc = hw->priv;
  1263. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1264. int ret = 0;
  1265. if (old_state == IEEE80211_STA_AUTH &&
  1266. new_state == IEEE80211_STA_ASSOC) {
  1267. ret = ath9k_sta_add(hw, vif, sta);
  1268. ath_dbg(common, CONFIG,
  1269. "Add station: %pM\n", sta->addr);
  1270. } else if (old_state == IEEE80211_STA_ASSOC &&
  1271. new_state == IEEE80211_STA_AUTH) {
  1272. ret = ath9k_sta_remove(hw, vif, sta);
  1273. ath_dbg(common, CONFIG,
  1274. "Remove station: %pM\n", sta->addr);
  1275. }
  1276. if (ath9k_is_chanctx_enabled()) {
  1277. if (vif->type == NL80211_IFTYPE_STATION) {
  1278. if (old_state == IEEE80211_STA_ASSOC &&
  1279. new_state == IEEE80211_STA_AUTHORIZED)
  1280. ath_chanctx_event(sc, vif,
  1281. ATH_CHANCTX_EVENT_AUTHORIZED);
  1282. }
  1283. }
  1284. return ret;
  1285. }
  1286. static void ath9k_sta_set_tx_filter(struct ath_hw *ah,
  1287. struct ath_node *an,
  1288. bool set)
  1289. {
  1290. int i;
  1291. for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
  1292. if (!an->key_idx[i])
  1293. continue;
  1294. ath9k_hw_set_tx_filter(ah, an->key_idx[i], set);
  1295. }
  1296. }
  1297. static void ath9k_sta_notify(struct ieee80211_hw *hw,
  1298. struct ieee80211_vif *vif,
  1299. enum sta_notify_cmd cmd,
  1300. struct ieee80211_sta *sta)
  1301. {
  1302. struct ath_softc *sc = hw->priv;
  1303. struct ath_node *an = (struct ath_node *) sta->drv_priv;
  1304. switch (cmd) {
  1305. case STA_NOTIFY_SLEEP:
  1306. an->sleeping = true;
  1307. ath_tx_aggr_sleep(sta, sc, an);
  1308. ath9k_sta_set_tx_filter(sc->sc_ah, an, true);
  1309. break;
  1310. case STA_NOTIFY_AWAKE:
  1311. ath9k_sta_set_tx_filter(sc->sc_ah, an, false);
  1312. an->sleeping = false;
  1313. ath_tx_aggr_wakeup(sc, an);
  1314. break;
  1315. }
  1316. }
  1317. static int ath9k_conf_tx(struct ieee80211_hw *hw,
  1318. struct ieee80211_vif *vif, u16 queue,
  1319. const struct ieee80211_tx_queue_params *params)
  1320. {
  1321. struct ath_softc *sc = hw->priv;
  1322. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1323. struct ath_txq *txq;
  1324. struct ath9k_tx_queue_info qi;
  1325. int ret = 0;
  1326. if (queue >= IEEE80211_NUM_ACS)
  1327. return 0;
  1328. txq = sc->tx.txq_map[queue];
  1329. ath9k_ps_wakeup(sc);
  1330. mutex_lock(&sc->mutex);
  1331. memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
  1332. qi.tqi_aifs = params->aifs;
  1333. qi.tqi_cwmin = params->cw_min;
  1334. qi.tqi_cwmax = params->cw_max;
  1335. qi.tqi_burstTime = params->txop * 32;
  1336. ath_dbg(common, CONFIG,
  1337. "Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
  1338. queue, txq->axq_qnum, params->aifs, params->cw_min,
  1339. params->cw_max, params->txop);
  1340. ath_update_max_aggr_framelen(sc, queue, qi.tqi_burstTime);
  1341. ret = ath_txq_update(sc, txq->axq_qnum, &qi);
  1342. if (ret)
  1343. ath_err(common, "TXQ Update failed\n");
  1344. mutex_unlock(&sc->mutex);
  1345. ath9k_ps_restore(sc);
  1346. return ret;
  1347. }
  1348. static int ath9k_set_key(struct ieee80211_hw *hw,
  1349. enum set_key_cmd cmd,
  1350. struct ieee80211_vif *vif,
  1351. struct ieee80211_sta *sta,
  1352. struct ieee80211_key_conf *key)
  1353. {
  1354. struct ath_softc *sc = hw->priv;
  1355. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1356. struct ath_node *an = NULL;
  1357. int ret = 0, i;
  1358. if (ath9k_modparam_nohwcrypt)
  1359. return -ENOSPC;
  1360. if ((vif->type == NL80211_IFTYPE_ADHOC ||
  1361. vif->type == NL80211_IFTYPE_MESH_POINT) &&
  1362. (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
  1363. key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
  1364. !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
  1365. /*
  1366. * For now, disable hw crypto for the RSN IBSS group keys. This
  1367. * could be optimized in the future to use a modified key cache
  1368. * design to support per-STA RX GTK, but until that gets
  1369. * implemented, use of software crypto for group addressed
  1370. * frames is a acceptable to allow RSN IBSS to be used.
  1371. */
  1372. return -EOPNOTSUPP;
  1373. }
  1374. mutex_lock(&sc->mutex);
  1375. ath9k_ps_wakeup(sc);
  1376. ath_dbg(common, CONFIG, "Set HW Key %d\n", cmd);
  1377. if (sta)
  1378. an = (struct ath_node *)sta->drv_priv;
  1379. switch (cmd) {
  1380. case SET_KEY:
  1381. if (sta)
  1382. ath9k_del_ps_key(sc, vif, sta);
  1383. key->hw_key_idx = 0;
  1384. ret = ath_key_config(common, vif, sta, key);
  1385. if (ret >= 0) {
  1386. key->hw_key_idx = ret;
  1387. /* push IV and Michael MIC generation to stack */
  1388. key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
  1389. if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
  1390. key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
  1391. if (sc->sc_ah->sw_mgmt_crypto_tx &&
  1392. key->cipher == WLAN_CIPHER_SUITE_CCMP)
  1393. key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
  1394. ret = 0;
  1395. }
  1396. if (an && key->hw_key_idx) {
  1397. for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
  1398. if (an->key_idx[i])
  1399. continue;
  1400. an->key_idx[i] = key->hw_key_idx;
  1401. break;
  1402. }
  1403. WARN_ON(i == ARRAY_SIZE(an->key_idx));
  1404. }
  1405. break;
  1406. case DISABLE_KEY:
  1407. ath_key_delete(common, key);
  1408. if (an) {
  1409. for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
  1410. if (an->key_idx[i] != key->hw_key_idx)
  1411. continue;
  1412. an->key_idx[i] = 0;
  1413. break;
  1414. }
  1415. }
  1416. key->hw_key_idx = 0;
  1417. break;
  1418. default:
  1419. ret = -EINVAL;
  1420. }
  1421. ath9k_ps_restore(sc);
  1422. mutex_unlock(&sc->mutex);
  1423. return ret;
  1424. }
  1425. static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
  1426. struct ieee80211_vif *vif,
  1427. struct ieee80211_bss_conf *bss_conf,
  1428. u32 changed)
  1429. {
  1430. #define CHECK_ANI \
  1431. (BSS_CHANGED_ASSOC | \
  1432. BSS_CHANGED_IBSS | \
  1433. BSS_CHANGED_BEACON_ENABLED)
  1434. struct ath_softc *sc = hw->priv;
  1435. struct ath_hw *ah = sc->sc_ah;
  1436. struct ath_common *common = ath9k_hw_common(ah);
  1437. struct ath_vif *avp = (void *)vif->drv_priv;
  1438. int slottime;
  1439. ath9k_ps_wakeup(sc);
  1440. mutex_lock(&sc->mutex);
  1441. if (changed & BSS_CHANGED_ASSOC) {
  1442. ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n",
  1443. bss_conf->bssid, bss_conf->assoc);
  1444. memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
  1445. avp->aid = bss_conf->aid;
  1446. avp->assoc = bss_conf->assoc;
  1447. ath9k_calculate_summary_state(sc, avp->chanctx);
  1448. }
  1449. if ((changed & BSS_CHANGED_IBSS) ||
  1450. (changed & BSS_CHANGED_OCB)) {
  1451. memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
  1452. common->curaid = bss_conf->aid;
  1453. ath9k_hw_write_associd(sc->sc_ah);
  1454. }
  1455. if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
  1456. (changed & BSS_CHANGED_BEACON_INT) ||
  1457. (changed & BSS_CHANGED_BEACON_INFO)) {
  1458. ath9k_beacon_config(sc, vif, changed);
  1459. if (changed & BSS_CHANGED_BEACON_ENABLED)
  1460. ath9k_calculate_summary_state(sc, avp->chanctx);
  1461. }
  1462. if ((avp->chanctx == sc->cur_chan) &&
  1463. (changed & BSS_CHANGED_ERP_SLOT)) {
  1464. if (bss_conf->use_short_slot)
  1465. slottime = 9;
  1466. else
  1467. slottime = 20;
  1468. if (vif->type == NL80211_IFTYPE_AP) {
  1469. /*
  1470. * Defer update, so that connected stations can adjust
  1471. * their settings at the same time.
  1472. * See beacon.c for more details
  1473. */
  1474. sc->beacon.slottime = slottime;
  1475. sc->beacon.updateslot = UPDATE;
  1476. } else {
  1477. ah->slottime = slottime;
  1478. ath9k_hw_init_global_settings(ah);
  1479. }
  1480. }
  1481. if (changed & BSS_CHANGED_P2P_PS)
  1482. ath9k_p2p_bss_info_changed(sc, vif);
  1483. if (changed & CHECK_ANI)
  1484. ath_check_ani(sc);
  1485. if (changed & BSS_CHANGED_TXPOWER) {
  1486. ath_dbg(common, CONFIG, "vif %pM power %d dbm power_type %d\n",
  1487. vif->addr, bss_conf->txpower, bss_conf->txpower_type);
  1488. ath9k_set_txpower(sc, vif);
  1489. }
  1490. mutex_unlock(&sc->mutex);
  1491. ath9k_ps_restore(sc);
  1492. #undef CHECK_ANI
  1493. }
  1494. static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
  1495. {
  1496. struct ath_softc *sc = hw->priv;
  1497. u64 tsf;
  1498. mutex_lock(&sc->mutex);
  1499. ath9k_ps_wakeup(sc);
  1500. tsf = ath9k_hw_gettsf64(sc->sc_ah);
  1501. ath9k_ps_restore(sc);
  1502. mutex_unlock(&sc->mutex);
  1503. return tsf;
  1504. }
  1505. static void ath9k_set_tsf(struct ieee80211_hw *hw,
  1506. struct ieee80211_vif *vif,
  1507. u64 tsf)
  1508. {
  1509. struct ath_softc *sc = hw->priv;
  1510. mutex_lock(&sc->mutex);
  1511. ath9k_ps_wakeup(sc);
  1512. ath9k_hw_settsf64(sc->sc_ah, tsf);
  1513. ath9k_ps_restore(sc);
  1514. mutex_unlock(&sc->mutex);
  1515. }
  1516. static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
  1517. {
  1518. struct ath_softc *sc = hw->priv;
  1519. mutex_lock(&sc->mutex);
  1520. ath9k_ps_wakeup(sc);
  1521. ath9k_hw_reset_tsf(sc->sc_ah);
  1522. ath9k_ps_restore(sc);
  1523. mutex_unlock(&sc->mutex);
  1524. }
  1525. static int ath9k_ampdu_action(struct ieee80211_hw *hw,
  1526. struct ieee80211_vif *vif,
  1527. struct ieee80211_ampdu_params *params)
  1528. {
  1529. struct ath_softc *sc = hw->priv;
  1530. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1531. bool flush = false;
  1532. int ret = 0;
  1533. struct ieee80211_sta *sta = params->sta;
  1534. enum ieee80211_ampdu_mlme_action action = params->action;
  1535. u16 tid = params->tid;
  1536. u16 *ssn = &params->ssn;
  1537. mutex_lock(&sc->mutex);
  1538. switch (action) {
  1539. case IEEE80211_AMPDU_RX_START:
  1540. break;
  1541. case IEEE80211_AMPDU_RX_STOP:
  1542. break;
  1543. case IEEE80211_AMPDU_TX_START:
  1544. if (ath9k_is_chanctx_enabled()) {
  1545. if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
  1546. ret = -EBUSY;
  1547. break;
  1548. }
  1549. }
  1550. ath9k_ps_wakeup(sc);
  1551. ret = ath_tx_aggr_start(sc, sta, tid, ssn);
  1552. if (!ret)
  1553. ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  1554. ath9k_ps_restore(sc);
  1555. break;
  1556. case IEEE80211_AMPDU_TX_STOP_FLUSH:
  1557. case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
  1558. flush = true;
  1559. case IEEE80211_AMPDU_TX_STOP_CONT:
  1560. ath9k_ps_wakeup(sc);
  1561. ath_tx_aggr_stop(sc, sta, tid);
  1562. if (!flush)
  1563. ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  1564. ath9k_ps_restore(sc);
  1565. break;
  1566. case IEEE80211_AMPDU_TX_OPERATIONAL:
  1567. ath9k_ps_wakeup(sc);
  1568. ath_tx_aggr_resume(sc, sta, tid);
  1569. ath9k_ps_restore(sc);
  1570. break;
  1571. default:
  1572. ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
  1573. }
  1574. mutex_unlock(&sc->mutex);
  1575. return ret;
  1576. }
  1577. static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
  1578. struct survey_info *survey)
  1579. {
  1580. struct ath_softc *sc = hw->priv;
  1581. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1582. struct ieee80211_supported_band *sband;
  1583. struct ieee80211_channel *chan;
  1584. int pos;
  1585. if (config_enabled(CONFIG_ATH9K_TX99))
  1586. return -EOPNOTSUPP;
  1587. spin_lock_bh(&common->cc_lock);
  1588. if (idx == 0)
  1589. ath_update_survey_stats(sc);
  1590. sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
  1591. if (sband && idx >= sband->n_channels) {
  1592. idx -= sband->n_channels;
  1593. sband = NULL;
  1594. }
  1595. if (!sband)
  1596. sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
  1597. if (!sband || idx >= sband->n_channels) {
  1598. spin_unlock_bh(&common->cc_lock);
  1599. return -ENOENT;
  1600. }
  1601. chan = &sband->channels[idx];
  1602. pos = chan->hw_value;
  1603. memcpy(survey, &sc->survey[pos], sizeof(*survey));
  1604. survey->channel = chan;
  1605. spin_unlock_bh(&common->cc_lock);
  1606. return 0;
  1607. }
  1608. static void ath9k_enable_dynack(struct ath_softc *sc)
  1609. {
  1610. #ifdef CONFIG_ATH9K_DYNACK
  1611. u32 rfilt;
  1612. struct ath_hw *ah = sc->sc_ah;
  1613. ath_dynack_reset(ah);
  1614. ah->dynack.enabled = true;
  1615. rfilt = ath_calcrxfilter(sc);
  1616. ath9k_hw_setrxfilter(ah, rfilt);
  1617. #endif
  1618. }
  1619. static void ath9k_set_coverage_class(struct ieee80211_hw *hw,
  1620. s16 coverage_class)
  1621. {
  1622. struct ath_softc *sc = hw->priv;
  1623. struct ath_hw *ah = sc->sc_ah;
  1624. if (config_enabled(CONFIG_ATH9K_TX99))
  1625. return;
  1626. mutex_lock(&sc->mutex);
  1627. if (coverage_class >= 0) {
  1628. ah->coverage_class = coverage_class;
  1629. if (ah->dynack.enabled) {
  1630. u32 rfilt;
  1631. ah->dynack.enabled = false;
  1632. rfilt = ath_calcrxfilter(sc);
  1633. ath9k_hw_setrxfilter(ah, rfilt);
  1634. }
  1635. ath9k_ps_wakeup(sc);
  1636. ath9k_hw_init_global_settings(ah);
  1637. ath9k_ps_restore(sc);
  1638. } else if (!ah->dynack.enabled) {
  1639. ath9k_enable_dynack(sc);
  1640. }
  1641. mutex_unlock(&sc->mutex);
  1642. }
  1643. static bool ath9k_has_tx_pending(struct ath_softc *sc,
  1644. bool sw_pending)
  1645. {
  1646. int i, npend = 0;
  1647. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1648. if (!ATH_TXQ_SETUP(sc, i))
  1649. continue;
  1650. npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i],
  1651. sw_pending);
  1652. if (npend)
  1653. break;
  1654. }
  1655. return !!npend;
  1656. }
  1657. static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  1658. u32 queues, bool drop)
  1659. {
  1660. struct ath_softc *sc = hw->priv;
  1661. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1662. if (ath9k_is_chanctx_enabled()) {
  1663. if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
  1664. goto flush;
  1665. /*
  1666. * If MCC is active, extend the flush timeout
  1667. * and wait for the HW/SW queues to become
  1668. * empty. This needs to be done outside the
  1669. * sc->mutex lock to allow the channel scheduler
  1670. * to switch channel contexts.
  1671. *
  1672. * The vif queues have been stopped in mac80211,
  1673. * so there won't be any incoming frames.
  1674. */
  1675. __ath9k_flush(hw, queues, drop, true, true);
  1676. return;
  1677. }
  1678. flush:
  1679. mutex_lock(&sc->mutex);
  1680. __ath9k_flush(hw, queues, drop, true, false);
  1681. mutex_unlock(&sc->mutex);
  1682. }
  1683. void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
  1684. bool sw_pending, bool timeout_override)
  1685. {
  1686. struct ath_softc *sc = hw->priv;
  1687. struct ath_hw *ah = sc->sc_ah;
  1688. struct ath_common *common = ath9k_hw_common(ah);
  1689. int timeout;
  1690. bool drain_txq;
  1691. cancel_delayed_work_sync(&sc->tx_complete_work);
  1692. if (ah->ah_flags & AH_UNPLUGGED) {
  1693. ath_dbg(common, ANY, "Device has been unplugged!\n");
  1694. return;
  1695. }
  1696. if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
  1697. ath_dbg(common, ANY, "Device not present\n");
  1698. return;
  1699. }
  1700. spin_lock_bh(&sc->chan_lock);
  1701. if (timeout_override)
  1702. timeout = HZ / 5;
  1703. else
  1704. timeout = sc->cur_chan->flush_timeout;
  1705. spin_unlock_bh(&sc->chan_lock);
  1706. ath_dbg(common, CHAN_CTX,
  1707. "Flush timeout: %d\n", jiffies_to_msecs(timeout));
  1708. if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc, sw_pending),
  1709. timeout) > 0)
  1710. drop = false;
  1711. if (drop) {
  1712. ath9k_ps_wakeup(sc);
  1713. spin_lock_bh(&sc->sc_pcu_lock);
  1714. drain_txq = ath_drain_all_txq(sc);
  1715. spin_unlock_bh(&sc->sc_pcu_lock);
  1716. if (!drain_txq)
  1717. ath_reset(sc, NULL);
  1718. ath9k_ps_restore(sc);
  1719. }
  1720. ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
  1721. }
  1722. static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
  1723. {
  1724. struct ath_softc *sc = hw->priv;
  1725. return ath9k_has_tx_pending(sc, true);
  1726. }
  1727. static int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
  1728. {
  1729. struct ath_softc *sc = hw->priv;
  1730. struct ath_hw *ah = sc->sc_ah;
  1731. struct ieee80211_vif *vif;
  1732. struct ath_vif *avp;
  1733. struct ath_buf *bf;
  1734. struct ath_tx_status ts;
  1735. bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
  1736. int status;
  1737. vif = sc->beacon.bslot[0];
  1738. if (!vif)
  1739. return 0;
  1740. if (!vif->bss_conf.enable_beacon)
  1741. return 0;
  1742. avp = (void *)vif->drv_priv;
  1743. if (!sc->beacon.tx_processed && !edma) {
  1744. tasklet_disable(&sc->bcon_tasklet);
  1745. bf = avp->av_bcbuf;
  1746. if (!bf || !bf->bf_mpdu)
  1747. goto skip;
  1748. status = ath9k_hw_txprocdesc(ah, bf->bf_desc, &ts);
  1749. if (status == -EINPROGRESS)
  1750. goto skip;
  1751. sc->beacon.tx_processed = true;
  1752. sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
  1753. skip:
  1754. tasklet_enable(&sc->bcon_tasklet);
  1755. }
  1756. return sc->beacon.tx_last;
  1757. }
  1758. static int ath9k_get_stats(struct ieee80211_hw *hw,
  1759. struct ieee80211_low_level_stats *stats)
  1760. {
  1761. struct ath_softc *sc = hw->priv;
  1762. struct ath_hw *ah = sc->sc_ah;
  1763. struct ath9k_mib_stats *mib_stats = &ah->ah_mibStats;
  1764. stats->dot11ACKFailureCount = mib_stats->ackrcv_bad;
  1765. stats->dot11RTSFailureCount = mib_stats->rts_bad;
  1766. stats->dot11FCSErrorCount = mib_stats->fcs_bad;
  1767. stats->dot11RTSSuccessCount = mib_stats->rts_good;
  1768. return 0;
  1769. }
  1770. static u32 fill_chainmask(u32 cap, u32 new)
  1771. {
  1772. u32 filled = 0;
  1773. int i;
  1774. for (i = 0; cap && new; i++, cap >>= 1) {
  1775. if (!(cap & BIT(0)))
  1776. continue;
  1777. if (new & BIT(0))
  1778. filled |= BIT(i);
  1779. new >>= 1;
  1780. }
  1781. return filled;
  1782. }
  1783. static bool validate_antenna_mask(struct ath_hw *ah, u32 val)
  1784. {
  1785. if (AR_SREV_9300_20_OR_LATER(ah))
  1786. return true;
  1787. switch (val & 0x7) {
  1788. case 0x1:
  1789. case 0x3:
  1790. case 0x7:
  1791. return true;
  1792. case 0x2:
  1793. return (ah->caps.rx_chainmask == 1);
  1794. default:
  1795. return false;
  1796. }
  1797. }
  1798. static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
  1799. {
  1800. struct ath_softc *sc = hw->priv;
  1801. struct ath_hw *ah = sc->sc_ah;
  1802. if (ah->caps.rx_chainmask != 1)
  1803. rx_ant |= tx_ant;
  1804. if (!validate_antenna_mask(ah, rx_ant) || !tx_ant)
  1805. return -EINVAL;
  1806. sc->ant_rx = rx_ant;
  1807. sc->ant_tx = tx_ant;
  1808. if (ah->caps.rx_chainmask == 1)
  1809. return 0;
  1810. /* AR9100 runs into calibration issues if not all rx chains are enabled */
  1811. if (AR_SREV_9100(ah))
  1812. ah->rxchainmask = 0x7;
  1813. else
  1814. ah->rxchainmask = fill_chainmask(ah->caps.rx_chainmask, rx_ant);
  1815. ah->txchainmask = fill_chainmask(ah->caps.tx_chainmask, tx_ant);
  1816. ath9k_cmn_reload_chainmask(ah);
  1817. return 0;
  1818. }
  1819. static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
  1820. {
  1821. struct ath_softc *sc = hw->priv;
  1822. *tx_ant = sc->ant_tx;
  1823. *rx_ant = sc->ant_rx;
  1824. return 0;
  1825. }
  1826. static void ath9k_sw_scan_start(struct ieee80211_hw *hw,
  1827. struct ieee80211_vif *vif,
  1828. const u8 *mac_addr)
  1829. {
  1830. struct ath_softc *sc = hw->priv;
  1831. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1832. set_bit(ATH_OP_SCANNING, &common->op_flags);
  1833. }
  1834. static void ath9k_sw_scan_complete(struct ieee80211_hw *hw,
  1835. struct ieee80211_vif *vif)
  1836. {
  1837. struct ath_softc *sc = hw->priv;
  1838. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1839. clear_bit(ATH_OP_SCANNING, &common->op_flags);
  1840. }
  1841. #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
  1842. static void ath9k_cancel_pending_offchannel(struct ath_softc *sc)
  1843. {
  1844. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1845. if (sc->offchannel.roc_vif) {
  1846. ath_dbg(common, CHAN_CTX,
  1847. "%s: Aborting RoC\n", __func__);
  1848. del_timer_sync(&sc->offchannel.timer);
  1849. if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
  1850. ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT);
  1851. }
  1852. if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
  1853. ath_dbg(common, CHAN_CTX,
  1854. "%s: Aborting HW scan\n", __func__);
  1855. del_timer_sync(&sc->offchannel.timer);
  1856. ath_scan_complete(sc, true);
  1857. }
  1858. }
  1859. static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  1860. struct ieee80211_scan_request *hw_req)
  1861. {
  1862. struct cfg80211_scan_request *req = &hw_req->req;
  1863. struct ath_softc *sc = hw->priv;
  1864. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1865. int ret = 0;
  1866. mutex_lock(&sc->mutex);
  1867. if (WARN_ON(sc->offchannel.scan_req)) {
  1868. ret = -EBUSY;
  1869. goto out;
  1870. }
  1871. ath9k_ps_wakeup(sc);
  1872. set_bit(ATH_OP_SCANNING, &common->op_flags);
  1873. sc->offchannel.scan_vif = vif;
  1874. sc->offchannel.scan_req = req;
  1875. sc->offchannel.scan_idx = 0;
  1876. ath_dbg(common, CHAN_CTX, "HW scan request received on vif: %pM\n",
  1877. vif->addr);
  1878. if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) {
  1879. ath_dbg(common, CHAN_CTX, "Starting HW scan\n");
  1880. ath_offchannel_next(sc);
  1881. }
  1882. out:
  1883. mutex_unlock(&sc->mutex);
  1884. return ret;
  1885. }
  1886. static void ath9k_cancel_hw_scan(struct ieee80211_hw *hw,
  1887. struct ieee80211_vif *vif)
  1888. {
  1889. struct ath_softc *sc = hw->priv;
  1890. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1891. ath_dbg(common, CHAN_CTX, "Cancel HW scan on vif: %pM\n", vif->addr);
  1892. mutex_lock(&sc->mutex);
  1893. del_timer_sync(&sc->offchannel.timer);
  1894. ath_scan_complete(sc, true);
  1895. mutex_unlock(&sc->mutex);
  1896. }
  1897. static int ath9k_remain_on_channel(struct ieee80211_hw *hw,
  1898. struct ieee80211_vif *vif,
  1899. struct ieee80211_channel *chan, int duration,
  1900. enum ieee80211_roc_type type)
  1901. {
  1902. struct ath_softc *sc = hw->priv;
  1903. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1904. int ret = 0;
  1905. mutex_lock(&sc->mutex);
  1906. if (WARN_ON(sc->offchannel.roc_vif)) {
  1907. ret = -EBUSY;
  1908. goto out;
  1909. }
  1910. ath9k_ps_wakeup(sc);
  1911. sc->offchannel.roc_vif = vif;
  1912. sc->offchannel.roc_chan = chan;
  1913. sc->offchannel.roc_duration = duration;
  1914. ath_dbg(common, CHAN_CTX,
  1915. "RoC request on vif: %pM, type: %d duration: %d\n",
  1916. vif->addr, type, duration);
  1917. if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) {
  1918. ath_dbg(common, CHAN_CTX, "Starting RoC period\n");
  1919. ath_offchannel_next(sc);
  1920. }
  1921. out:
  1922. mutex_unlock(&sc->mutex);
  1923. return ret;
  1924. }
  1925. static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw)
  1926. {
  1927. struct ath_softc *sc = hw->priv;
  1928. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1929. mutex_lock(&sc->mutex);
  1930. ath_dbg(common, CHAN_CTX, "Cancel RoC\n");
  1931. del_timer_sync(&sc->offchannel.timer);
  1932. if (sc->offchannel.roc_vif) {
  1933. if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
  1934. ath_roc_complete(sc, ATH_ROC_COMPLETE_CANCEL);
  1935. }
  1936. mutex_unlock(&sc->mutex);
  1937. return 0;
  1938. }
  1939. static int ath9k_add_chanctx(struct ieee80211_hw *hw,
  1940. struct ieee80211_chanctx_conf *conf)
  1941. {
  1942. struct ath_softc *sc = hw->priv;
  1943. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1944. struct ath_chanctx *ctx, **ptr;
  1945. int pos;
  1946. mutex_lock(&sc->mutex);
  1947. ath_for_each_chanctx(sc, ctx) {
  1948. if (ctx->assigned)
  1949. continue;
  1950. ptr = (void *) conf->drv_priv;
  1951. *ptr = ctx;
  1952. ctx->assigned = true;
  1953. pos = ctx - &sc->chanctx[0];
  1954. ctx->hw_queue_base = pos * IEEE80211_NUM_ACS;
  1955. ath_dbg(common, CHAN_CTX,
  1956. "Add channel context: %d MHz\n",
  1957. conf->def.chan->center_freq);
  1958. ath_chanctx_set_channel(sc, ctx, &conf->def);
  1959. mutex_unlock(&sc->mutex);
  1960. return 0;
  1961. }
  1962. mutex_unlock(&sc->mutex);
  1963. return -ENOSPC;
  1964. }
  1965. static void ath9k_remove_chanctx(struct ieee80211_hw *hw,
  1966. struct ieee80211_chanctx_conf *conf)
  1967. {
  1968. struct ath_softc *sc = hw->priv;
  1969. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1970. struct ath_chanctx *ctx = ath_chanctx_get(conf);
  1971. mutex_lock(&sc->mutex);
  1972. ath_dbg(common, CHAN_CTX,
  1973. "Remove channel context: %d MHz\n",
  1974. conf->def.chan->center_freq);
  1975. ctx->assigned = false;
  1976. ctx->hw_queue_base = 0;
  1977. ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN);
  1978. mutex_unlock(&sc->mutex);
  1979. }
  1980. static void ath9k_change_chanctx(struct ieee80211_hw *hw,
  1981. struct ieee80211_chanctx_conf *conf,
  1982. u32 changed)
  1983. {
  1984. struct ath_softc *sc = hw->priv;
  1985. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1986. struct ath_chanctx *ctx = ath_chanctx_get(conf);
  1987. mutex_lock(&sc->mutex);
  1988. ath_dbg(common, CHAN_CTX,
  1989. "Change channel context: %d MHz\n",
  1990. conf->def.chan->center_freq);
  1991. ath_chanctx_set_channel(sc, ctx, &conf->def);
  1992. mutex_unlock(&sc->mutex);
  1993. }
  1994. static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw,
  1995. struct ieee80211_vif *vif,
  1996. struct ieee80211_chanctx_conf *conf)
  1997. {
  1998. struct ath_softc *sc = hw->priv;
  1999. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  2000. struct ath_vif *avp = (void *)vif->drv_priv;
  2001. struct ath_chanctx *ctx = ath_chanctx_get(conf);
  2002. int i;
  2003. ath9k_cancel_pending_offchannel(sc);
  2004. mutex_lock(&sc->mutex);
  2005. ath_dbg(common, CHAN_CTX,
  2006. "Assign VIF (addr: %pM, type: %d, p2p: %d) to channel context: %d MHz\n",
  2007. vif->addr, vif->type, vif->p2p,
  2008. conf->def.chan->center_freq);
  2009. avp->chanctx = ctx;
  2010. ctx->nvifs_assigned++;
  2011. list_add_tail(&avp->list, &ctx->vifs);
  2012. ath9k_calculate_summary_state(sc, ctx);
  2013. for (i = 0; i < IEEE80211_NUM_ACS; i++)
  2014. vif->hw_queue[i] = ctx->hw_queue_base + i;
  2015. mutex_unlock(&sc->mutex);
  2016. return 0;
  2017. }
  2018. static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw,
  2019. struct ieee80211_vif *vif,
  2020. struct ieee80211_chanctx_conf *conf)
  2021. {
  2022. struct ath_softc *sc = hw->priv;
  2023. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  2024. struct ath_vif *avp = (void *)vif->drv_priv;
  2025. struct ath_chanctx *ctx = ath_chanctx_get(conf);
  2026. int ac;
  2027. ath9k_cancel_pending_offchannel(sc);
  2028. mutex_lock(&sc->mutex);
  2029. ath_dbg(common, CHAN_CTX,
  2030. "Remove VIF (addr: %pM, type: %d, p2p: %d) from channel context: %d MHz\n",
  2031. vif->addr, vif->type, vif->p2p,
  2032. conf->def.chan->center_freq);
  2033. avp->chanctx = NULL;
  2034. ctx->nvifs_assigned--;
  2035. list_del(&avp->list);
  2036. ath9k_calculate_summary_state(sc, ctx);
  2037. for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
  2038. vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
  2039. mutex_unlock(&sc->mutex);
  2040. }
  2041. static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw,
  2042. struct ieee80211_vif *vif)
  2043. {
  2044. struct ath_softc *sc = hw->priv;
  2045. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  2046. struct ath_vif *avp = (struct ath_vif *) vif->drv_priv;
  2047. struct ath_beacon_config *cur_conf;
  2048. struct ath_chanctx *go_ctx;
  2049. unsigned long timeout;
  2050. bool changed = false;
  2051. u32 beacon_int;
  2052. if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
  2053. return;
  2054. if (!avp->chanctx)
  2055. return;
  2056. mutex_lock(&sc->mutex);
  2057. spin_lock_bh(&sc->chan_lock);
  2058. if (sc->next_chan || (sc->cur_chan != avp->chanctx))
  2059. changed = true;
  2060. spin_unlock_bh(&sc->chan_lock);
  2061. if (!changed)
  2062. goto out;
  2063. ath9k_cancel_pending_offchannel(sc);
  2064. go_ctx = ath_is_go_chanctx_present(sc);
  2065. if (go_ctx) {
  2066. /*
  2067. * Wait till the GO interface gets a chance
  2068. * to send out an NoA.
  2069. */
  2070. spin_lock_bh(&sc->chan_lock);
  2071. sc->sched.mgd_prepare_tx = true;
  2072. cur_conf = &go_ctx->beacon;
  2073. beacon_int = TU_TO_USEC(cur_conf->beacon_interval);
  2074. spin_unlock_bh(&sc->chan_lock);
  2075. timeout = usecs_to_jiffies(beacon_int * 2);
  2076. init_completion(&sc->go_beacon);
  2077. mutex_unlock(&sc->mutex);
  2078. if (wait_for_completion_timeout(&sc->go_beacon,
  2079. timeout) == 0) {
  2080. ath_dbg(common, CHAN_CTX,
  2081. "Failed to send new NoA\n");
  2082. spin_lock_bh(&sc->chan_lock);
  2083. sc->sched.mgd_prepare_tx = false;
  2084. spin_unlock_bh(&sc->chan_lock);
  2085. }
  2086. mutex_lock(&sc->mutex);
  2087. }
  2088. ath_dbg(common, CHAN_CTX,
  2089. "%s: Set chanctx state to FORCE_ACTIVE for vif: %pM\n",
  2090. __func__, vif->addr);
  2091. spin_lock_bh(&sc->chan_lock);
  2092. sc->next_chan = avp->chanctx;
  2093. sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE;
  2094. spin_unlock_bh(&sc->chan_lock);
  2095. ath_chanctx_set_next(sc, true);
  2096. out:
  2097. mutex_unlock(&sc->mutex);
  2098. }
  2099. void ath9k_fill_chanctx_ops(void)
  2100. {
  2101. if (!ath9k_is_chanctx_enabled())
  2102. return;
  2103. ath9k_ops.hw_scan = ath9k_hw_scan;
  2104. ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
  2105. ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
  2106. ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
  2107. ath9k_ops.add_chanctx = ath9k_add_chanctx;
  2108. ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
  2109. ath9k_ops.change_chanctx = ath9k_change_chanctx;
  2110. ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
  2111. ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
  2112. ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
  2113. }
  2114. #endif
  2115. static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  2116. int *dbm)
  2117. {
  2118. struct ath_softc *sc = hw->priv;
  2119. struct ath_vif *avp = (void *)vif->drv_priv;
  2120. mutex_lock(&sc->mutex);
  2121. if (avp->chanctx)
  2122. *dbm = avp->chanctx->cur_txpower;
  2123. else
  2124. *dbm = sc->cur_chan->cur_txpower;
  2125. mutex_unlock(&sc->mutex);
  2126. *dbm /= 2;
  2127. return 0;
  2128. }
  2129. struct ieee80211_ops ath9k_ops = {
  2130. .tx = ath9k_tx,
  2131. .start = ath9k_start,
  2132. .stop = ath9k_stop,
  2133. .add_interface = ath9k_add_interface,
  2134. .change_interface = ath9k_change_interface,
  2135. .remove_interface = ath9k_remove_interface,
  2136. .config = ath9k_config,
  2137. .configure_filter = ath9k_configure_filter,
  2138. .sta_state = ath9k_sta_state,
  2139. .sta_notify = ath9k_sta_notify,
  2140. .conf_tx = ath9k_conf_tx,
  2141. .bss_info_changed = ath9k_bss_info_changed,
  2142. .set_key = ath9k_set_key,
  2143. .get_tsf = ath9k_get_tsf,
  2144. .set_tsf = ath9k_set_tsf,
  2145. .reset_tsf = ath9k_reset_tsf,
  2146. .ampdu_action = ath9k_ampdu_action,
  2147. .get_survey = ath9k_get_survey,
  2148. .rfkill_poll = ath9k_rfkill_poll_state,
  2149. .set_coverage_class = ath9k_set_coverage_class,
  2150. .flush = ath9k_flush,
  2151. .tx_frames_pending = ath9k_tx_frames_pending,
  2152. .tx_last_beacon = ath9k_tx_last_beacon,
  2153. .release_buffered_frames = ath9k_release_buffered_frames,
  2154. .get_stats = ath9k_get_stats,
  2155. .set_antenna = ath9k_set_antenna,
  2156. .get_antenna = ath9k_get_antenna,
  2157. #ifdef CONFIG_ATH9K_WOW
  2158. .suspend = ath9k_suspend,
  2159. .resume = ath9k_resume,
  2160. .set_wakeup = ath9k_set_wakeup,
  2161. #endif
  2162. #ifdef CONFIG_ATH9K_DEBUGFS
  2163. .get_et_sset_count = ath9k_get_et_sset_count,
  2164. .get_et_stats = ath9k_get_et_stats,
  2165. .get_et_strings = ath9k_get_et_strings,
  2166. #endif
  2167. #if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_STATION_STATISTICS)
  2168. .sta_add_debugfs = ath9k_sta_add_debugfs,
  2169. #endif
  2170. .sw_scan_start = ath9k_sw_scan_start,
  2171. .sw_scan_complete = ath9k_sw_scan_complete,
  2172. .get_txpower = ath9k_get_txpower,
  2173. };