wmi.c 132 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <linux/skbuff.h>
  18. #include <linux/ctype.h>
  19. #include "core.h"
  20. #include "htc.h"
  21. #include "debug.h"
  22. #include "wmi.h"
  23. #include "mac.h"
  24. #include "testmode.h"
  25. /* MAIN WMI cmd track */
  26. static struct wmi_cmd_map wmi_cmd_map = {
  27. .init_cmdid = WMI_INIT_CMDID,
  28. .start_scan_cmdid = WMI_START_SCAN_CMDID,
  29. .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
  30. .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
  31. .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
  32. .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
  33. .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
  34. .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
  35. .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
  36. .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
  37. .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
  38. .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
  39. .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
  40. .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
  41. .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
  42. .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  43. .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
  44. .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
  45. .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
  46. .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
  47. .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
  48. .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
  49. .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
  50. .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
  51. .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
  52. .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
  53. .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
  54. .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
  55. .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
  56. .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
  57. .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
  58. .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
  59. .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
  60. .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
  61. .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
  62. .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
  63. .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
  64. .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
  65. .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
  66. .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
  67. .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
  68. .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
  69. .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
  70. .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
  71. .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
  72. .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
  73. .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
  74. .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
  75. .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
  76. .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
  77. .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
  78. .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
  79. .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
  80. .roam_scan_mode = WMI_ROAM_SCAN_MODE,
  81. .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
  82. .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
  83. .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  84. .roam_ap_profile = WMI_ROAM_AP_PROFILE,
  85. .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
  86. .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
  87. .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
  88. .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
  89. .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
  90. .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
  91. .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
  92. .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
  93. .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
  94. .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
  95. .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
  96. .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
  97. .wlan_profile_set_hist_intvl_cmdid =
  98. WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  99. .wlan_profile_get_profile_data_cmdid =
  100. WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  101. .wlan_profile_enable_profile_id_cmdid =
  102. WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  103. .wlan_profile_list_profile_id_cmdid =
  104. WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  105. .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
  106. .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
  107. .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
  108. .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
  109. .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
  110. .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
  111. .wow_enable_disable_wake_event_cmdid =
  112. WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  113. .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
  114. .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  115. .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
  116. .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
  117. .vdev_spectral_scan_configure_cmdid =
  118. WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  119. .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  120. .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
  121. .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
  122. .network_list_offload_config_cmdid =
  123. WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
  124. .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
  125. .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
  126. .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
  127. .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
  128. .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
  129. .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
  130. .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
  131. .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
  132. .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
  133. .echo_cmdid = WMI_ECHO_CMDID,
  134. .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
  135. .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
  136. .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
  137. .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
  138. .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
  139. .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
  140. .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
  141. .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
  142. .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
  143. };
  144. /* 10.X WMI cmd track */
  145. static struct wmi_cmd_map wmi_10x_cmd_map = {
  146. .init_cmdid = WMI_10X_INIT_CMDID,
  147. .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
  148. .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
  149. .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
  150. .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
  151. .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
  152. .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
  153. .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
  154. .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
  155. .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
  156. .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
  157. .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
  158. .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
  159. .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
  160. .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
  161. .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  162. .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
  163. .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
  164. .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
  165. .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
  166. .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
  167. .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
  168. .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
  169. .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
  170. .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
  171. .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
  172. .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
  173. .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
  174. .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
  175. .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
  176. .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
  177. .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
  178. .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
  179. .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
  180. .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
  181. .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
  182. .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
  183. .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  184. .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
  185. .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
  186. .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
  187. .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  188. .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
  189. .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
  190. .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
  191. .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
  192. .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
  193. .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
  194. .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
  195. .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
  196. .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
  197. .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
  198. .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
  199. .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
  200. .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
  201. .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
  202. .roam_scan_rssi_change_threshold =
  203. WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  204. .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
  205. .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
  206. .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
  207. .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
  208. .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
  209. .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
  210. .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
  211. .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
  212. .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
  213. .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
  214. .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
  215. .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
  216. .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
  217. .wlan_profile_set_hist_intvl_cmdid =
  218. WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  219. .wlan_profile_get_profile_data_cmdid =
  220. WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  221. .wlan_profile_enable_profile_id_cmdid =
  222. WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  223. .wlan_profile_list_profile_id_cmdid =
  224. WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  225. .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
  226. .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
  227. .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
  228. .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
  229. .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
  230. .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
  231. .wow_enable_disable_wake_event_cmdid =
  232. WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  233. .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
  234. .wow_hostwakeup_from_sleep_cmdid =
  235. WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  236. .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
  237. .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
  238. .vdev_spectral_scan_configure_cmdid =
  239. WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  240. .vdev_spectral_scan_enable_cmdid =
  241. WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  242. .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
  243. .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
  244. .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
  245. .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
  246. .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
  247. .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
  248. .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
  249. .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
  250. .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
  251. .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
  252. .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
  253. .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
  254. .echo_cmdid = WMI_10X_ECHO_CMDID,
  255. .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
  256. .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
  257. .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
  258. .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
  259. .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  260. .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  261. .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
  262. .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
  263. .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
  264. };
  265. /* MAIN WMI VDEV param map */
  266. static struct wmi_vdev_param_map wmi_vdev_param_map = {
  267. .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
  268. .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
  269. .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
  270. .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
  271. .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
  272. .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
  273. .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
  274. .preamble = WMI_VDEV_PARAM_PREAMBLE,
  275. .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
  276. .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
  277. .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
  278. .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
  279. .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
  280. .wmi_vdev_oc_scheduler_air_time_limit =
  281. WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
  282. .wds = WMI_VDEV_PARAM_WDS,
  283. .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
  284. .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
  285. .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
  286. .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
  287. .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
  288. .chwidth = WMI_VDEV_PARAM_CHWIDTH,
  289. .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
  290. .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
  291. .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
  292. .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
  293. .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
  294. .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
  295. .sgi = WMI_VDEV_PARAM_SGI,
  296. .ldpc = WMI_VDEV_PARAM_LDPC,
  297. .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
  298. .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
  299. .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
  300. .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
  301. .nss = WMI_VDEV_PARAM_NSS,
  302. .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
  303. .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
  304. .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
  305. .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
  306. .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
  307. .ap_keepalive_min_idle_inactive_time_secs =
  308. WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
  309. .ap_keepalive_max_idle_inactive_time_secs =
  310. WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
  311. .ap_keepalive_max_unresponsive_time_secs =
  312. WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
  313. .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
  314. .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
  315. .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
  316. .txbf = WMI_VDEV_PARAM_TXBF,
  317. .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
  318. .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
  319. .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
  320. .ap_detect_out_of_sync_sleeping_sta_time_secs =
  321. WMI_VDEV_PARAM_UNSUPPORTED,
  322. };
  323. /* 10.X WMI VDEV param map */
  324. static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
  325. .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
  326. .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
  327. .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
  328. .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
  329. .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
  330. .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
  331. .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
  332. .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
  333. .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
  334. .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
  335. .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
  336. .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
  337. .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
  338. .wmi_vdev_oc_scheduler_air_time_limit =
  339. WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
  340. .wds = WMI_10X_VDEV_PARAM_WDS,
  341. .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
  342. .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
  343. .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
  344. .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
  345. .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
  346. .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
  347. .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
  348. .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
  349. .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
  350. .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
  351. .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
  352. .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
  353. .sgi = WMI_10X_VDEV_PARAM_SGI,
  354. .ldpc = WMI_10X_VDEV_PARAM_LDPC,
  355. .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
  356. .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
  357. .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
  358. .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
  359. .nss = WMI_10X_VDEV_PARAM_NSS,
  360. .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
  361. .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
  362. .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
  363. .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
  364. .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
  365. .ap_keepalive_min_idle_inactive_time_secs =
  366. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
  367. .ap_keepalive_max_idle_inactive_time_secs =
  368. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
  369. .ap_keepalive_max_unresponsive_time_secs =
  370. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
  371. .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
  372. .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
  373. .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
  374. .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
  375. .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
  376. .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
  377. .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
  378. .ap_detect_out_of_sync_sleeping_sta_time_secs =
  379. WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
  380. };
  381. static struct wmi_pdev_param_map wmi_pdev_param_map = {
  382. .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
  383. .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
  384. .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
  385. .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
  386. .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
  387. .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
  388. .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
  389. .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
  390. .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
  391. .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
  392. .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
  393. .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
  394. .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
  395. .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
  396. .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
  397. .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
  398. .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
  399. .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
  400. .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
  401. .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
  402. .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
  403. .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
  404. .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
  405. .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
  406. .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
  407. .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
  408. .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
  409. .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
  410. .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
  411. .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
  412. .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
  413. .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
  414. .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
  415. .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
  416. .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
  417. .dcs = WMI_PDEV_PARAM_DCS,
  418. .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
  419. .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
  420. .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
  421. .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
  422. .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
  423. .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
  424. .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
  425. .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
  426. .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
  427. .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
  428. .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
  429. .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
  430. };
  431. static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
  432. .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
  433. .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
  434. .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
  435. .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
  436. .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
  437. .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
  438. .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
  439. .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
  440. .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
  441. .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
  442. .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
  443. .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
  444. .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
  445. .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
  446. .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
  447. .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
  448. .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
  449. .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
  450. .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
  451. .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
  452. .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
  453. .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
  454. .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
  455. .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
  456. .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
  457. .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
  458. .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
  459. .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
  460. .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
  461. .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
  462. .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
  463. .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
  464. .bcnflt_stats_update_period =
  465. WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
  466. .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
  467. .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
  468. .dcs = WMI_10X_PDEV_PARAM_DCS,
  469. .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
  470. .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
  471. .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
  472. .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
  473. .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
  474. .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
  475. .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
  476. .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
  477. .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
  478. .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
  479. .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
  480. .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
  481. };
  482. /* firmware 10.2 specific mappings */
  483. static struct wmi_cmd_map wmi_10_2_cmd_map = {
  484. .init_cmdid = WMI_10_2_INIT_CMDID,
  485. .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
  486. .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
  487. .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
  488. .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
  489. .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
  490. .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
  491. .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
  492. .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
  493. .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
  494. .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
  495. .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
  496. .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
  497. .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
  498. .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  499. .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
  500. .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
  501. .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
  502. .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
  503. .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
  504. .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
  505. .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
  506. .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
  507. .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
  508. .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
  509. .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
  510. .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
  511. .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
  512. .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
  513. .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
  514. .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
  515. .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
  516. .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
  517. .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
  518. .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
  519. .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
  520. .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  521. .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
  522. .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
  523. .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
  524. .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  525. .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
  526. .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
  527. .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
  528. .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
  529. .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
  530. .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
  531. .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
  532. .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
  533. .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
  534. .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
  535. .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
  536. .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
  537. .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
  538. .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
  539. .roam_scan_rssi_change_threshold =
  540. WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  541. .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
  542. .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
  543. .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
  544. .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
  545. .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
  546. .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
  547. .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
  548. .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
  549. .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
  550. .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
  551. .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
  552. .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
  553. .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
  554. .wlan_profile_set_hist_intvl_cmdid =
  555. WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  556. .wlan_profile_get_profile_data_cmdid =
  557. WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  558. .wlan_profile_enable_profile_id_cmdid =
  559. WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  560. .wlan_profile_list_profile_id_cmdid =
  561. WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  562. .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
  563. .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
  564. .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
  565. .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
  566. .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
  567. .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
  568. .wow_enable_disable_wake_event_cmdid =
  569. WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  570. .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
  571. .wow_hostwakeup_from_sleep_cmdid =
  572. WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  573. .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
  574. .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
  575. .vdev_spectral_scan_configure_cmdid =
  576. WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  577. .vdev_spectral_scan_enable_cmdid =
  578. WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  579. .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
  580. .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
  581. .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
  582. .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
  583. .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
  584. .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
  585. .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
  586. .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
  587. .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
  588. .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
  589. .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
  590. .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
  591. .echo_cmdid = WMI_10_2_ECHO_CMDID,
  592. .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
  593. .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
  594. .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
  595. .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
  596. .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  597. .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  598. .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
  599. .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
  600. .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
  601. };
  602. int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
  603. {
  604. int ret;
  605. ret = wait_for_completion_timeout(&ar->wmi.service_ready,
  606. WMI_SERVICE_READY_TIMEOUT_HZ);
  607. return ret;
  608. }
  609. int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
  610. {
  611. int ret;
  612. ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
  613. WMI_UNIFIED_READY_TIMEOUT_HZ);
  614. return ret;
  615. }
  616. struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
  617. {
  618. struct sk_buff *skb;
  619. u32 round_len = roundup(len, 4);
  620. skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
  621. if (!skb)
  622. return NULL;
  623. skb_reserve(skb, WMI_SKB_HEADROOM);
  624. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  625. ath10k_warn(ar, "Unaligned WMI skb\n");
  626. skb_put(skb, round_len);
  627. memset(skb->data, 0, round_len);
  628. return skb;
  629. }
  630. static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
  631. {
  632. dev_kfree_skb(skb);
  633. }
  634. static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
  635. u32 cmd_id)
  636. {
  637. struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
  638. struct wmi_cmd_hdr *cmd_hdr;
  639. int ret;
  640. u32 cmd = 0;
  641. if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  642. return -ENOMEM;
  643. cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
  644. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  645. cmd_hdr->cmd_id = __cpu_to_le32(cmd);
  646. memset(skb_cb, 0, sizeof(*skb_cb));
  647. ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
  648. trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
  649. if (ret)
  650. goto err_pull;
  651. return 0;
  652. err_pull:
  653. skb_pull(skb, sizeof(struct wmi_cmd_hdr));
  654. return ret;
  655. }
  656. static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
  657. {
  658. int ret;
  659. lockdep_assert_held(&arvif->ar->data_lock);
  660. if (arvif->beacon == NULL)
  661. return;
  662. if (arvif->beacon_sent)
  663. return;
  664. ret = ath10k_wmi_beacon_send_ref_nowait(arvif);
  665. if (ret)
  666. return;
  667. /* We need to retain the arvif->beacon reference for DMA unmapping and
  668. * freeing the skbuff later. */
  669. arvif->beacon_sent = true;
  670. }
  671. static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
  672. struct ieee80211_vif *vif)
  673. {
  674. struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  675. ath10k_wmi_tx_beacon_nowait(arvif);
  676. }
  677. static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
  678. {
  679. spin_lock_bh(&ar->data_lock);
  680. ieee80211_iterate_active_interfaces_atomic(ar->hw,
  681. IEEE80211_IFACE_ITER_NORMAL,
  682. ath10k_wmi_tx_beacons_iter,
  683. NULL);
  684. spin_unlock_bh(&ar->data_lock);
  685. }
  686. static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
  687. {
  688. /* try to send pending beacons first. they take priority */
  689. ath10k_wmi_tx_beacons_nowait(ar);
  690. wake_up(&ar->wmi.tx_credits_wq);
  691. }
  692. int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
  693. {
  694. int ret = -EOPNOTSUPP;
  695. might_sleep();
  696. if (cmd_id == WMI_CMD_UNSUPPORTED) {
  697. ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
  698. cmd_id);
  699. return ret;
  700. }
  701. wait_event_timeout(ar->wmi.tx_credits_wq, ({
  702. /* try to send pending beacons first. they take priority */
  703. ath10k_wmi_tx_beacons_nowait(ar);
  704. ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
  705. (ret != -EAGAIN);
  706. }), 3*HZ);
  707. if (ret)
  708. dev_kfree_skb_any(skb);
  709. return ret;
  710. }
  711. int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
  712. {
  713. int ret = 0;
  714. struct wmi_mgmt_tx_cmd *cmd;
  715. struct ieee80211_hdr *hdr;
  716. struct sk_buff *wmi_skb;
  717. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  718. int len;
  719. u32 buf_len = skb->len;
  720. u16 fc;
  721. hdr = (struct ieee80211_hdr *)skb->data;
  722. fc = le16_to_cpu(hdr->frame_control);
  723. if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
  724. return -EINVAL;
  725. len = sizeof(cmd->hdr) + skb->len;
  726. if ((ieee80211_is_action(hdr->frame_control) ||
  727. ieee80211_is_deauth(hdr->frame_control) ||
  728. ieee80211_is_disassoc(hdr->frame_control)) &&
  729. ieee80211_has_protected(hdr->frame_control)) {
  730. len += IEEE80211_CCMP_MIC_LEN;
  731. buf_len += IEEE80211_CCMP_MIC_LEN;
  732. }
  733. len = round_up(len, 4);
  734. wmi_skb = ath10k_wmi_alloc_skb(ar, len);
  735. if (!wmi_skb)
  736. return -ENOMEM;
  737. cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data;
  738. cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
  739. cmd->hdr.tx_rate = 0;
  740. cmd->hdr.tx_power = 0;
  741. cmd->hdr.buf_len = __cpu_to_le32(buf_len);
  742. ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
  743. memcpy(cmd->buf, skb->data, skb->len);
  744. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
  745. wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
  746. fc & IEEE80211_FCTL_STYPE);
  747. /* Send the management frame buffer to the target */
  748. ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
  749. if (ret)
  750. return ret;
  751. /* TODO: report tx status to mac80211 - temporary just ACK */
  752. info->flags |= IEEE80211_TX_STAT_ACK;
  753. ieee80211_tx_status_irqsafe(ar->hw, skb);
  754. return ret;
  755. }
  756. static void ath10k_wmi_event_scan_started(struct ath10k *ar)
  757. {
  758. lockdep_assert_held(&ar->data_lock);
  759. switch (ar->scan.state) {
  760. case ATH10K_SCAN_IDLE:
  761. case ATH10K_SCAN_RUNNING:
  762. case ATH10K_SCAN_ABORTING:
  763. ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
  764. ath10k_scan_state_str(ar->scan.state),
  765. ar->scan.state);
  766. break;
  767. case ATH10K_SCAN_STARTING:
  768. ar->scan.state = ATH10K_SCAN_RUNNING;
  769. if (ar->scan.is_roc)
  770. ieee80211_ready_on_channel(ar->hw);
  771. complete(&ar->scan.started);
  772. break;
  773. }
  774. }
  775. static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
  776. {
  777. lockdep_assert_held(&ar->data_lock);
  778. switch (ar->scan.state) {
  779. case ATH10K_SCAN_IDLE:
  780. case ATH10K_SCAN_STARTING:
  781. /* One suspected reason scan can be completed while starting is
  782. * if firmware fails to deliver all scan events to the host,
  783. * e.g. when transport pipe is full. This has been observed
  784. * with spectral scan phyerr events starving wmi transport
  785. * pipe. In such case the "scan completed" event should be (and
  786. * is) ignored by the host as it may be just firmware's scan
  787. * state machine recovering.
  788. */
  789. ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
  790. ath10k_scan_state_str(ar->scan.state),
  791. ar->scan.state);
  792. break;
  793. case ATH10K_SCAN_RUNNING:
  794. case ATH10K_SCAN_ABORTING:
  795. __ath10k_scan_finish(ar);
  796. break;
  797. }
  798. }
  799. static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
  800. {
  801. lockdep_assert_held(&ar->data_lock);
  802. switch (ar->scan.state) {
  803. case ATH10K_SCAN_IDLE:
  804. case ATH10K_SCAN_STARTING:
  805. ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
  806. ath10k_scan_state_str(ar->scan.state),
  807. ar->scan.state);
  808. break;
  809. case ATH10K_SCAN_RUNNING:
  810. case ATH10K_SCAN_ABORTING:
  811. ar->scan_channel = NULL;
  812. break;
  813. }
  814. }
  815. static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
  816. {
  817. lockdep_assert_held(&ar->data_lock);
  818. switch (ar->scan.state) {
  819. case ATH10K_SCAN_IDLE:
  820. case ATH10K_SCAN_STARTING:
  821. ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
  822. ath10k_scan_state_str(ar->scan.state),
  823. ar->scan.state);
  824. break;
  825. case ATH10K_SCAN_RUNNING:
  826. case ATH10K_SCAN_ABORTING:
  827. ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
  828. if (ar->scan.is_roc && ar->scan.roc_freq == freq)
  829. complete(&ar->scan.on_channel);
  830. break;
  831. }
  832. }
  833. static const char *
  834. ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
  835. enum wmi_scan_completion_reason reason)
  836. {
  837. switch (type) {
  838. case WMI_SCAN_EVENT_STARTED:
  839. return "started";
  840. case WMI_SCAN_EVENT_COMPLETED:
  841. switch (reason) {
  842. case WMI_SCAN_REASON_COMPLETED:
  843. return "completed";
  844. case WMI_SCAN_REASON_CANCELLED:
  845. return "completed [cancelled]";
  846. case WMI_SCAN_REASON_PREEMPTED:
  847. return "completed [preempted]";
  848. case WMI_SCAN_REASON_TIMEDOUT:
  849. return "completed [timedout]";
  850. case WMI_SCAN_REASON_MAX:
  851. break;
  852. }
  853. return "completed [unknown]";
  854. case WMI_SCAN_EVENT_BSS_CHANNEL:
  855. return "bss channel";
  856. case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
  857. return "foreign channel";
  858. case WMI_SCAN_EVENT_DEQUEUED:
  859. return "dequeued";
  860. case WMI_SCAN_EVENT_PREEMPTED:
  861. return "preempted";
  862. case WMI_SCAN_EVENT_START_FAILED:
  863. return "start failed";
  864. default:
  865. return "unknown";
  866. }
  867. }
  868. static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
  869. {
  870. struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
  871. enum wmi_scan_event_type event_type;
  872. enum wmi_scan_completion_reason reason;
  873. u32 freq;
  874. u32 req_id;
  875. u32 scan_id;
  876. u32 vdev_id;
  877. event_type = __le32_to_cpu(event->event_type);
  878. reason = __le32_to_cpu(event->reason);
  879. freq = __le32_to_cpu(event->channel_freq);
  880. req_id = __le32_to_cpu(event->scan_req_id);
  881. scan_id = __le32_to_cpu(event->scan_id);
  882. vdev_id = __le32_to_cpu(event->vdev_id);
  883. spin_lock_bh(&ar->data_lock);
  884. ath10k_dbg(ar, ATH10K_DBG_WMI,
  885. "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
  886. ath10k_wmi_event_scan_type_str(event_type, reason),
  887. event_type, reason, freq, req_id, scan_id, vdev_id,
  888. ath10k_scan_state_str(ar->scan.state), ar->scan.state);
  889. switch (event_type) {
  890. case WMI_SCAN_EVENT_STARTED:
  891. ath10k_wmi_event_scan_started(ar);
  892. break;
  893. case WMI_SCAN_EVENT_COMPLETED:
  894. ath10k_wmi_event_scan_completed(ar);
  895. break;
  896. case WMI_SCAN_EVENT_BSS_CHANNEL:
  897. ath10k_wmi_event_scan_bss_chan(ar);
  898. break;
  899. case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
  900. ath10k_wmi_event_scan_foreign_chan(ar, freq);
  901. break;
  902. case WMI_SCAN_EVENT_START_FAILED:
  903. ath10k_warn(ar, "received scan start failure event\n");
  904. break;
  905. case WMI_SCAN_EVENT_DEQUEUED:
  906. case WMI_SCAN_EVENT_PREEMPTED:
  907. default:
  908. break;
  909. }
  910. spin_unlock_bh(&ar->data_lock);
  911. return 0;
  912. }
  913. static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
  914. {
  915. enum ieee80211_band band;
  916. switch (phy_mode) {
  917. case MODE_11A:
  918. case MODE_11NA_HT20:
  919. case MODE_11NA_HT40:
  920. case MODE_11AC_VHT20:
  921. case MODE_11AC_VHT40:
  922. case MODE_11AC_VHT80:
  923. band = IEEE80211_BAND_5GHZ;
  924. break;
  925. case MODE_11G:
  926. case MODE_11B:
  927. case MODE_11GONLY:
  928. case MODE_11NG_HT20:
  929. case MODE_11NG_HT40:
  930. case MODE_11AC_VHT20_2G:
  931. case MODE_11AC_VHT40_2G:
  932. case MODE_11AC_VHT80_2G:
  933. default:
  934. band = IEEE80211_BAND_2GHZ;
  935. }
  936. return band;
  937. }
  938. static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
  939. {
  940. u8 rate_idx = 0;
  941. /* rate in Kbps */
  942. switch (rate) {
  943. case 1000:
  944. rate_idx = 0;
  945. break;
  946. case 2000:
  947. rate_idx = 1;
  948. break;
  949. case 5500:
  950. rate_idx = 2;
  951. break;
  952. case 11000:
  953. rate_idx = 3;
  954. break;
  955. case 6000:
  956. rate_idx = 4;
  957. break;
  958. case 9000:
  959. rate_idx = 5;
  960. break;
  961. case 12000:
  962. rate_idx = 6;
  963. break;
  964. case 18000:
  965. rate_idx = 7;
  966. break;
  967. case 24000:
  968. rate_idx = 8;
  969. break;
  970. case 36000:
  971. rate_idx = 9;
  972. break;
  973. case 48000:
  974. rate_idx = 10;
  975. break;
  976. case 54000:
  977. rate_idx = 11;
  978. break;
  979. default:
  980. break;
  981. }
  982. if (band == IEEE80211_BAND_5GHZ) {
  983. if (rate_idx > 3)
  984. /* Omit CCK rates */
  985. rate_idx -= 4;
  986. else
  987. rate_idx = 0;
  988. }
  989. return rate_idx;
  990. }
  991. static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
  992. {
  993. struct wmi_mgmt_rx_event_v1 *ev_v1;
  994. struct wmi_mgmt_rx_event_v2 *ev_v2;
  995. struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
  996. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  997. struct ieee80211_channel *ch;
  998. struct ieee80211_hdr *hdr;
  999. u32 rx_status;
  1000. u32 channel;
  1001. u32 phy_mode;
  1002. u32 snr;
  1003. u32 rate;
  1004. u32 buf_len;
  1005. u16 fc;
  1006. int pull_len;
  1007. if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
  1008. ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
  1009. ev_hdr = &ev_v2->hdr.v1;
  1010. pull_len = sizeof(*ev_v2);
  1011. } else {
  1012. ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
  1013. ev_hdr = &ev_v1->hdr;
  1014. pull_len = sizeof(*ev_v1);
  1015. }
  1016. channel = __le32_to_cpu(ev_hdr->channel);
  1017. buf_len = __le32_to_cpu(ev_hdr->buf_len);
  1018. rx_status = __le32_to_cpu(ev_hdr->status);
  1019. snr = __le32_to_cpu(ev_hdr->snr);
  1020. phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
  1021. rate = __le32_to_cpu(ev_hdr->rate);
  1022. memset(status, 0, sizeof(*status));
  1023. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1024. "event mgmt rx status %08x\n", rx_status);
  1025. if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  1026. dev_kfree_skb(skb);
  1027. return 0;
  1028. }
  1029. if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
  1030. dev_kfree_skb(skb);
  1031. return 0;
  1032. }
  1033. if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
  1034. dev_kfree_skb(skb);
  1035. return 0;
  1036. }
  1037. if (rx_status & WMI_RX_STATUS_ERR_CRC)
  1038. status->flag |= RX_FLAG_FAILED_FCS_CRC;
  1039. if (rx_status & WMI_RX_STATUS_ERR_MIC)
  1040. status->flag |= RX_FLAG_MMIC_ERROR;
  1041. /* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to
  1042. * MODE_11B. This means phy_mode is not a reliable source for the band
  1043. * of mgmt rx. */
  1044. ch = ar->scan_channel;
  1045. if (!ch)
  1046. ch = ar->rx_channel;
  1047. if (ch) {
  1048. status->band = ch->band;
  1049. if (phy_mode == MODE_11B &&
  1050. status->band == IEEE80211_BAND_5GHZ)
  1051. ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
  1052. } else {
  1053. ath10k_warn(ar, "using (unreliable) phy_mode to extract band for mgmt rx\n");
  1054. status->band = phy_mode_to_band(phy_mode);
  1055. }
  1056. status->freq = ieee80211_channel_to_frequency(channel, status->band);
  1057. status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
  1058. status->rate_idx = get_rate_idx(rate, status->band);
  1059. skb_pull(skb, pull_len);
  1060. hdr = (struct ieee80211_hdr *)skb->data;
  1061. fc = le16_to_cpu(hdr->frame_control);
  1062. /* FW delivers WEP Shared Auth frame with Protected Bit set and
  1063. * encrypted payload. However in case of PMF it delivers decrypted
  1064. * frames with Protected Bit set. */
  1065. if (ieee80211_has_protected(hdr->frame_control) &&
  1066. !ieee80211_is_auth(hdr->frame_control)) {
  1067. status->flag |= RX_FLAG_DECRYPTED;
  1068. if (!ieee80211_is_action(hdr->frame_control) &&
  1069. !ieee80211_is_deauth(hdr->frame_control) &&
  1070. !ieee80211_is_disassoc(hdr->frame_control)) {
  1071. status->flag |= RX_FLAG_IV_STRIPPED |
  1072. RX_FLAG_MMIC_STRIPPED;
  1073. hdr->frame_control = __cpu_to_le16(fc &
  1074. ~IEEE80211_FCTL_PROTECTED);
  1075. }
  1076. }
  1077. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1078. "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
  1079. skb, skb->len,
  1080. fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
  1081. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1082. "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
  1083. status->freq, status->band, status->signal,
  1084. status->rate_idx);
  1085. /*
  1086. * packets from HTC come aligned to 4byte boundaries
  1087. * because they can originally come in along with a trailer
  1088. */
  1089. skb_trim(skb, buf_len);
  1090. ieee80211_rx(ar->hw, skb);
  1091. return 0;
  1092. }
  1093. static int freq_to_idx(struct ath10k *ar, int freq)
  1094. {
  1095. struct ieee80211_supported_band *sband;
  1096. int band, ch, idx = 0;
  1097. for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
  1098. sband = ar->hw->wiphy->bands[band];
  1099. if (!sband)
  1100. continue;
  1101. for (ch = 0; ch < sband->n_channels; ch++, idx++)
  1102. if (sband->channels[ch].center_freq == freq)
  1103. goto exit;
  1104. }
  1105. exit:
  1106. return idx;
  1107. }
  1108. static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
  1109. {
  1110. struct wmi_chan_info_event *ev;
  1111. struct survey_info *survey;
  1112. u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
  1113. int idx;
  1114. ev = (struct wmi_chan_info_event *)skb->data;
  1115. err_code = __le32_to_cpu(ev->err_code);
  1116. freq = __le32_to_cpu(ev->freq);
  1117. cmd_flags = __le32_to_cpu(ev->cmd_flags);
  1118. noise_floor = __le32_to_cpu(ev->noise_floor);
  1119. rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
  1120. cycle_count = __le32_to_cpu(ev->cycle_count);
  1121. ath10k_dbg(ar, ATH10K_DBG_WMI,
  1122. "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
  1123. err_code, freq, cmd_flags, noise_floor, rx_clear_count,
  1124. cycle_count);
  1125. spin_lock_bh(&ar->data_lock);
  1126. switch (ar->scan.state) {
  1127. case ATH10K_SCAN_IDLE:
  1128. case ATH10K_SCAN_STARTING:
  1129. ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
  1130. goto exit;
  1131. case ATH10K_SCAN_RUNNING:
  1132. case ATH10K_SCAN_ABORTING:
  1133. break;
  1134. }
  1135. idx = freq_to_idx(ar, freq);
  1136. if (idx >= ARRAY_SIZE(ar->survey)) {
  1137. ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
  1138. freq, idx);
  1139. goto exit;
  1140. }
  1141. if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
  1142. /* During scanning chan info is reported twice for each
  1143. * visited channel. The reported cycle count is global
  1144. * and per-channel cycle count must be calculated */
  1145. cycle_count -= ar->survey_last_cycle_count;
  1146. rx_clear_count -= ar->survey_last_rx_clear_count;
  1147. survey = &ar->survey[idx];
  1148. survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count);
  1149. survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
  1150. survey->noise = noise_floor;
  1151. survey->filled = SURVEY_INFO_CHANNEL_TIME |
  1152. SURVEY_INFO_CHANNEL_TIME_RX |
  1153. SURVEY_INFO_NOISE_DBM;
  1154. }
  1155. ar->survey_last_rx_clear_count = rx_clear_count;
  1156. ar->survey_last_cycle_count = cycle_count;
  1157. exit:
  1158. spin_unlock_bh(&ar->data_lock);
  1159. }
  1160. static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
  1161. {
  1162. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
  1163. }
  1164. static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
  1165. {
  1166. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
  1167. skb->len);
  1168. trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
  1169. return 0;
  1170. }
  1171. static void ath10k_wmi_event_update_stats(struct ath10k *ar,
  1172. struct sk_buff *skb)
  1173. {
  1174. struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
  1175. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
  1176. ath10k_debug_read_target_stats(ar, ev);
  1177. }
  1178. static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
  1179. struct sk_buff *skb)
  1180. {
  1181. struct wmi_vdev_start_response_event *ev;
  1182. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
  1183. ev = (struct wmi_vdev_start_response_event *)skb->data;
  1184. if (WARN_ON(__le32_to_cpu(ev->status)))
  1185. return;
  1186. complete(&ar->vdev_setup_done);
  1187. }
  1188. static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
  1189. struct sk_buff *skb)
  1190. {
  1191. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
  1192. complete(&ar->vdev_setup_done);
  1193. }
  1194. static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
  1195. struct sk_buff *skb)
  1196. {
  1197. struct wmi_peer_sta_kickout_event *ev;
  1198. struct ieee80211_sta *sta;
  1199. ev = (struct wmi_peer_sta_kickout_event *)skb->data;
  1200. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
  1201. ev->peer_macaddr.addr);
  1202. rcu_read_lock();
  1203. sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL);
  1204. if (!sta) {
  1205. ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
  1206. ev->peer_macaddr.addr);
  1207. goto exit;
  1208. }
  1209. ieee80211_report_low_ack(sta, 10);
  1210. exit:
  1211. rcu_read_unlock();
  1212. }
  1213. /*
  1214. * FIXME
  1215. *
  1216. * We don't report to mac80211 sleep state of connected
  1217. * stations. Due to this mac80211 can't fill in TIM IE
  1218. * correctly.
  1219. *
  1220. * I know of no way of getting nullfunc frames that contain
  1221. * sleep transition from connected stations - these do not
  1222. * seem to be sent from the target to the host. There also
  1223. * doesn't seem to be a dedicated event for that. So the
  1224. * only way left to do this would be to read tim_bitmap
  1225. * during SWBA.
  1226. *
  1227. * We could probably try using tim_bitmap from SWBA to tell
  1228. * mac80211 which stations are asleep and which are not. The
  1229. * problem here is calling mac80211 functions so many times
  1230. * could take too long and make us miss the time to submit
  1231. * the beacon to the target.
  1232. *
  1233. * So as a workaround we try to extend the TIM IE if there
  1234. * is unicast buffered for stations with aid > 7 and fill it
  1235. * in ourselves.
  1236. */
  1237. static void ath10k_wmi_update_tim(struct ath10k *ar,
  1238. struct ath10k_vif *arvif,
  1239. struct sk_buff *bcn,
  1240. struct wmi_bcn_info *bcn_info)
  1241. {
  1242. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
  1243. struct ieee80211_tim_ie *tim;
  1244. u8 *ies, *ie;
  1245. u8 ie_len, pvm_len;
  1246. __le32 t;
  1247. u32 v;
  1248. /* if next SWBA has no tim_changed the tim_bitmap is garbage.
  1249. * we must copy the bitmap upon change and reuse it later */
  1250. if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
  1251. int i;
  1252. BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
  1253. sizeof(bcn_info->tim_info.tim_bitmap));
  1254. for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
  1255. t = bcn_info->tim_info.tim_bitmap[i / 4];
  1256. v = __le32_to_cpu(t);
  1257. arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
  1258. }
  1259. /* FW reports either length 0 or 16
  1260. * so we calculate this on our own */
  1261. arvif->u.ap.tim_len = 0;
  1262. for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
  1263. if (arvif->u.ap.tim_bitmap[i])
  1264. arvif->u.ap.tim_len = i;
  1265. arvif->u.ap.tim_len++;
  1266. }
  1267. ies = bcn->data;
  1268. ies += ieee80211_hdrlen(hdr->frame_control);
  1269. ies += 12; /* fixed parameters */
  1270. ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
  1271. (u8 *)skb_tail_pointer(bcn) - ies);
  1272. if (!ie) {
  1273. if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
  1274. ath10k_warn(ar, "no tim ie found;\n");
  1275. return;
  1276. }
  1277. tim = (void *)ie + 2;
  1278. ie_len = ie[1];
  1279. pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
  1280. if (pvm_len < arvif->u.ap.tim_len) {
  1281. int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
  1282. int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
  1283. void *next_ie = ie + 2 + ie_len;
  1284. if (skb_put(bcn, expand_size)) {
  1285. memmove(next_ie + expand_size, next_ie, move_size);
  1286. ie[1] += expand_size;
  1287. ie_len += expand_size;
  1288. pvm_len += expand_size;
  1289. } else {
  1290. ath10k_warn(ar, "tim expansion failed\n");
  1291. }
  1292. }
  1293. if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
  1294. ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
  1295. return;
  1296. }
  1297. tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
  1298. memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
  1299. if (tim->dtim_count == 0) {
  1300. ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
  1301. if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1)
  1302. ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
  1303. }
  1304. ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
  1305. tim->dtim_count, tim->dtim_period,
  1306. tim->bitmap_ctrl, pvm_len);
  1307. }
  1308. static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
  1309. struct wmi_p2p_noa_info *noa)
  1310. {
  1311. struct ieee80211_p2p_noa_attr *noa_attr;
  1312. u8 ctwindow_oppps = noa->ctwindow_oppps;
  1313. u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
  1314. bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
  1315. __le16 *noa_attr_len;
  1316. u16 attr_len;
  1317. u8 noa_descriptors = noa->num_descriptors;
  1318. int i;
  1319. /* P2P IE */
  1320. data[0] = WLAN_EID_VENDOR_SPECIFIC;
  1321. data[1] = len - 2;
  1322. data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
  1323. data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
  1324. data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
  1325. data[5] = WLAN_OUI_TYPE_WFA_P2P;
  1326. /* NOA ATTR */
  1327. data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
  1328. noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
  1329. noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
  1330. noa_attr->index = noa->index;
  1331. noa_attr->oppps_ctwindow = ctwindow;
  1332. if (oppps)
  1333. noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
  1334. for (i = 0; i < noa_descriptors; i++) {
  1335. noa_attr->desc[i].count =
  1336. __le32_to_cpu(noa->descriptors[i].type_count);
  1337. noa_attr->desc[i].duration = noa->descriptors[i].duration;
  1338. noa_attr->desc[i].interval = noa->descriptors[i].interval;
  1339. noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
  1340. }
  1341. attr_len = 2; /* index + oppps_ctwindow */
  1342. attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
  1343. *noa_attr_len = __cpu_to_le16(attr_len);
  1344. }
  1345. static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
  1346. {
  1347. u32 len = 0;
  1348. u8 noa_descriptors = noa->num_descriptors;
  1349. u8 opp_ps_info = noa->ctwindow_oppps;
  1350. bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
  1351. if (!noa_descriptors && !opps_enabled)
  1352. return len;
  1353. len += 1 + 1 + 4; /* EID + len + OUI */
  1354. len += 1 + 2; /* noa attr + attr len */
  1355. len += 1 + 1; /* index + oppps_ctwindow */
  1356. len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
  1357. return len;
  1358. }
  1359. static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
  1360. struct sk_buff *bcn,
  1361. struct wmi_bcn_info *bcn_info)
  1362. {
  1363. struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
  1364. u8 *new_data, *old_data = arvif->u.ap.noa_data;
  1365. u32 new_len;
  1366. if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
  1367. return;
  1368. ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
  1369. if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
  1370. new_len = ath10k_p2p_calc_noa_ie_len(noa);
  1371. if (!new_len)
  1372. goto cleanup;
  1373. new_data = kmalloc(new_len, GFP_ATOMIC);
  1374. if (!new_data)
  1375. goto cleanup;
  1376. ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
  1377. spin_lock_bh(&ar->data_lock);
  1378. arvif->u.ap.noa_data = new_data;
  1379. arvif->u.ap.noa_len = new_len;
  1380. spin_unlock_bh(&ar->data_lock);
  1381. kfree(old_data);
  1382. }
  1383. if (arvif->u.ap.noa_data)
  1384. if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
  1385. memcpy(skb_put(bcn, arvif->u.ap.noa_len),
  1386. arvif->u.ap.noa_data,
  1387. arvif->u.ap.noa_len);
  1388. return;
  1389. cleanup:
  1390. spin_lock_bh(&ar->data_lock);
  1391. arvif->u.ap.noa_data = NULL;
  1392. arvif->u.ap.noa_len = 0;
  1393. spin_unlock_bh(&ar->data_lock);
  1394. kfree(old_data);
  1395. }
  1396. static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
  1397. {
  1398. struct wmi_host_swba_event *ev;
  1399. u32 map;
  1400. int i = -1;
  1401. struct wmi_bcn_info *bcn_info;
  1402. struct ath10k_vif *arvif;
  1403. struct sk_buff *bcn;
  1404. int ret, vdev_id = 0;
  1405. ev = (struct wmi_host_swba_event *)skb->data;
  1406. map = __le32_to_cpu(ev->vdev_map);
  1407. ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
  1408. ev->vdev_map);
  1409. for (; map; map >>= 1, vdev_id++) {
  1410. if (!(map & 0x1))
  1411. continue;
  1412. i++;
  1413. if (i >= WMI_MAX_AP_VDEV) {
  1414. ath10k_warn(ar, "swba has corrupted vdev map\n");
  1415. break;
  1416. }
  1417. bcn_info = &ev->bcn_info[i];
  1418. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1419. "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
  1420. i,
  1421. __le32_to_cpu(bcn_info->tim_info.tim_len),
  1422. __le32_to_cpu(bcn_info->tim_info.tim_mcast),
  1423. __le32_to_cpu(bcn_info->tim_info.tim_changed),
  1424. __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
  1425. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
  1426. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
  1427. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
  1428. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
  1429. arvif = ath10k_get_arvif(ar, vdev_id);
  1430. if (arvif == NULL) {
  1431. ath10k_warn(ar, "no vif for vdev_id %d found\n",
  1432. vdev_id);
  1433. continue;
  1434. }
  1435. /* There are no completions for beacons so wait for next SWBA
  1436. * before telling mac80211 to decrement CSA counter
  1437. *
  1438. * Once CSA counter is completed stop sending beacons until
  1439. * actual channel switch is done */
  1440. if (arvif->vif->csa_active &&
  1441. ieee80211_csa_is_complete(arvif->vif)) {
  1442. ieee80211_csa_finish(arvif->vif);
  1443. continue;
  1444. }
  1445. bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
  1446. if (!bcn) {
  1447. ath10k_warn(ar, "could not get mac80211 beacon\n");
  1448. continue;
  1449. }
  1450. ath10k_tx_h_seq_no(arvif->vif, bcn);
  1451. ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
  1452. ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
  1453. spin_lock_bh(&ar->data_lock);
  1454. if (arvif->beacon) {
  1455. if (!arvif->beacon_sent)
  1456. ath10k_warn(ar, "SWBA overrun on vdev %d\n",
  1457. arvif->vdev_id);
  1458. dma_unmap_single(arvif->ar->dev,
  1459. ATH10K_SKB_CB(arvif->beacon)->paddr,
  1460. arvif->beacon->len, DMA_TO_DEVICE);
  1461. dev_kfree_skb_any(arvif->beacon);
  1462. arvif->beacon = NULL;
  1463. }
  1464. ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
  1465. bcn->data, bcn->len,
  1466. DMA_TO_DEVICE);
  1467. ret = dma_mapping_error(arvif->ar->dev,
  1468. ATH10K_SKB_CB(bcn)->paddr);
  1469. if (ret) {
  1470. ath10k_warn(ar, "failed to map beacon: %d\n", ret);
  1471. dev_kfree_skb_any(bcn);
  1472. goto skip;
  1473. }
  1474. arvif->beacon = bcn;
  1475. arvif->beacon_sent = false;
  1476. ath10k_wmi_tx_beacon_nowait(arvif);
  1477. skip:
  1478. spin_unlock_bh(&ar->data_lock);
  1479. }
  1480. }
  1481. static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
  1482. struct sk_buff *skb)
  1483. {
  1484. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
  1485. }
  1486. static void ath10k_dfs_radar_report(struct ath10k *ar,
  1487. struct wmi_single_phyerr_rx_event *event,
  1488. struct phyerr_radar_report *rr,
  1489. u64 tsf)
  1490. {
  1491. u32 reg0, reg1, tsf32l;
  1492. struct pulse_event pe;
  1493. u64 tsf64;
  1494. u8 rssi, width;
  1495. reg0 = __le32_to_cpu(rr->reg0);
  1496. reg1 = __le32_to_cpu(rr->reg1);
  1497. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1498. "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
  1499. MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
  1500. MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
  1501. MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
  1502. MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
  1503. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1504. "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
  1505. MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
  1506. MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
  1507. MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
  1508. MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
  1509. MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
  1510. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1511. "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
  1512. MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
  1513. MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
  1514. if (!ar->dfs_detector)
  1515. return;
  1516. /* report event to DFS pattern detector */
  1517. tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp);
  1518. tsf64 = tsf & (~0xFFFFFFFFULL);
  1519. tsf64 |= tsf32l;
  1520. width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
  1521. rssi = event->hdr.rssi_combined;
  1522. /* hardware store this as 8 bit signed value,
  1523. * set to zero if negative number
  1524. */
  1525. if (rssi & 0x80)
  1526. rssi = 0;
  1527. pe.ts = tsf64;
  1528. pe.freq = ar->hw->conf.chandef.chan->center_freq;
  1529. pe.width = width;
  1530. pe.rssi = rssi;
  1531. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1532. "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
  1533. pe.freq, pe.width, pe.rssi, pe.ts);
  1534. ATH10K_DFS_STAT_INC(ar, pulses_detected);
  1535. if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
  1536. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1537. "dfs no pulse pattern detected, yet\n");
  1538. return;
  1539. }
  1540. ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
  1541. ATH10K_DFS_STAT_INC(ar, radar_detected);
  1542. /* Control radar events reporting in debugfs file
  1543. dfs_block_radar_events */
  1544. if (ar->dfs_block_radar_events) {
  1545. ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
  1546. return;
  1547. }
  1548. ieee80211_radar_detected(ar->hw);
  1549. }
  1550. static int ath10k_dfs_fft_report(struct ath10k *ar,
  1551. struct wmi_single_phyerr_rx_event *event,
  1552. struct phyerr_fft_report *fftr,
  1553. u64 tsf)
  1554. {
  1555. u32 reg0, reg1;
  1556. u8 rssi, peak_mag;
  1557. reg0 = __le32_to_cpu(fftr->reg0);
  1558. reg1 = __le32_to_cpu(fftr->reg1);
  1559. rssi = event->hdr.rssi_combined;
  1560. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1561. "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
  1562. MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
  1563. MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
  1564. MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
  1565. MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
  1566. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1567. "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
  1568. MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
  1569. MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
  1570. MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
  1571. MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
  1572. peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
  1573. /* false event detection */
  1574. if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
  1575. peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
  1576. ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
  1577. ATH10K_DFS_STAT_INC(ar, pulses_discarded);
  1578. return -EINVAL;
  1579. }
  1580. return 0;
  1581. }
  1582. static void ath10k_wmi_event_dfs(struct ath10k *ar,
  1583. struct wmi_single_phyerr_rx_event *event,
  1584. u64 tsf)
  1585. {
  1586. int buf_len, tlv_len, res, i = 0;
  1587. struct phyerr_tlv *tlv;
  1588. struct phyerr_radar_report *rr;
  1589. struct phyerr_fft_report *fftr;
  1590. u8 *tlv_buf;
  1591. buf_len = __le32_to_cpu(event->hdr.buf_len);
  1592. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1593. "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
  1594. event->hdr.phy_err_code, event->hdr.rssi_combined,
  1595. __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len);
  1596. /* Skip event if DFS disabled */
  1597. if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
  1598. return;
  1599. ATH10K_DFS_STAT_INC(ar, pulses_total);
  1600. while (i < buf_len) {
  1601. if (i + sizeof(*tlv) > buf_len) {
  1602. ath10k_warn(ar, "too short buf for tlv header (%d)\n",
  1603. i);
  1604. return;
  1605. }
  1606. tlv = (struct phyerr_tlv *)&event->bufp[i];
  1607. tlv_len = __le16_to_cpu(tlv->len);
  1608. tlv_buf = &event->bufp[i + sizeof(*tlv)];
  1609. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1610. "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
  1611. tlv_len, tlv->tag, tlv->sig);
  1612. switch (tlv->tag) {
  1613. case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
  1614. if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
  1615. ath10k_warn(ar, "too short radar pulse summary (%d)\n",
  1616. i);
  1617. return;
  1618. }
  1619. rr = (struct phyerr_radar_report *)tlv_buf;
  1620. ath10k_dfs_radar_report(ar, event, rr, tsf);
  1621. break;
  1622. case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
  1623. if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
  1624. ath10k_warn(ar, "too short fft report (%d)\n",
  1625. i);
  1626. return;
  1627. }
  1628. fftr = (struct phyerr_fft_report *)tlv_buf;
  1629. res = ath10k_dfs_fft_report(ar, event, fftr, tsf);
  1630. if (res)
  1631. return;
  1632. break;
  1633. }
  1634. i += sizeof(*tlv) + tlv_len;
  1635. }
  1636. }
  1637. static void
  1638. ath10k_wmi_event_spectral_scan(struct ath10k *ar,
  1639. struct wmi_single_phyerr_rx_event *event,
  1640. u64 tsf)
  1641. {
  1642. int buf_len, tlv_len, res, i = 0;
  1643. struct phyerr_tlv *tlv;
  1644. u8 *tlv_buf;
  1645. struct phyerr_fft_report *fftr;
  1646. size_t fftr_len;
  1647. buf_len = __le32_to_cpu(event->hdr.buf_len);
  1648. while (i < buf_len) {
  1649. if (i + sizeof(*tlv) > buf_len) {
  1650. ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
  1651. i);
  1652. return;
  1653. }
  1654. tlv = (struct phyerr_tlv *)&event->bufp[i];
  1655. tlv_len = __le16_to_cpu(tlv->len);
  1656. tlv_buf = &event->bufp[i + sizeof(*tlv)];
  1657. if (i + sizeof(*tlv) + tlv_len > buf_len) {
  1658. ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
  1659. i);
  1660. return;
  1661. }
  1662. switch (tlv->tag) {
  1663. case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
  1664. if (sizeof(*fftr) > tlv_len) {
  1665. ath10k_warn(ar, "failed to parse fft report at byte %d\n",
  1666. i);
  1667. return;
  1668. }
  1669. fftr_len = tlv_len - sizeof(*fftr);
  1670. fftr = (struct phyerr_fft_report *)tlv_buf;
  1671. res = ath10k_spectral_process_fft(ar, event,
  1672. fftr, fftr_len,
  1673. tsf);
  1674. if (res < 0) {
  1675. ath10k_warn(ar, "failed to process fft report: %d\n",
  1676. res);
  1677. return;
  1678. }
  1679. break;
  1680. }
  1681. i += sizeof(*tlv) + tlv_len;
  1682. }
  1683. }
  1684. static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
  1685. {
  1686. struct wmi_comb_phyerr_rx_event *comb_event;
  1687. struct wmi_single_phyerr_rx_event *event;
  1688. u32 count, i, buf_len, phy_err_code;
  1689. u64 tsf;
  1690. int left_len = skb->len;
  1691. ATH10K_DFS_STAT_INC(ar, phy_errors);
  1692. /* Check if combined event available */
  1693. if (left_len < sizeof(*comb_event)) {
  1694. ath10k_warn(ar, "wmi phyerr combined event wrong len\n");
  1695. return;
  1696. }
  1697. left_len -= sizeof(*comb_event);
  1698. /* Check number of included events */
  1699. comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data;
  1700. count = __le32_to_cpu(comb_event->hdr.num_phyerr_events);
  1701. tsf = __le32_to_cpu(comb_event->hdr.tsf_u32);
  1702. tsf <<= 32;
  1703. tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32);
  1704. ath10k_dbg(ar, ATH10K_DBG_WMI,
  1705. "wmi event phyerr count %d tsf64 0x%llX\n",
  1706. count, tsf);
  1707. event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp;
  1708. for (i = 0; i < count; i++) {
  1709. /* Check if we can read event header */
  1710. if (left_len < sizeof(*event)) {
  1711. ath10k_warn(ar, "single event (%d) wrong head len\n",
  1712. i);
  1713. return;
  1714. }
  1715. left_len -= sizeof(*event);
  1716. buf_len = __le32_to_cpu(event->hdr.buf_len);
  1717. phy_err_code = event->hdr.phy_err_code;
  1718. if (left_len < buf_len) {
  1719. ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
  1720. return;
  1721. }
  1722. left_len -= buf_len;
  1723. switch (phy_err_code) {
  1724. case PHY_ERROR_RADAR:
  1725. ath10k_wmi_event_dfs(ar, event, tsf);
  1726. break;
  1727. case PHY_ERROR_SPECTRAL_SCAN:
  1728. ath10k_wmi_event_spectral_scan(ar, event, tsf);
  1729. break;
  1730. case PHY_ERROR_FALSE_RADAR_EXT:
  1731. ath10k_wmi_event_dfs(ar, event, tsf);
  1732. ath10k_wmi_event_spectral_scan(ar, event, tsf);
  1733. break;
  1734. default:
  1735. break;
  1736. }
  1737. event += sizeof(*event) + buf_len;
  1738. }
  1739. }
  1740. static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
  1741. {
  1742. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
  1743. }
  1744. static void ath10k_wmi_event_profile_match(struct ath10k *ar,
  1745. struct sk_buff *skb)
  1746. {
  1747. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
  1748. }
  1749. static void ath10k_wmi_event_debug_print(struct ath10k *ar,
  1750. struct sk_buff *skb)
  1751. {
  1752. char buf[101], c;
  1753. int i;
  1754. for (i = 0; i < sizeof(buf) - 1; i++) {
  1755. if (i >= skb->len)
  1756. break;
  1757. c = skb->data[i];
  1758. if (c == '\0')
  1759. break;
  1760. if (isascii(c) && isprint(c))
  1761. buf[i] = c;
  1762. else
  1763. buf[i] = '.';
  1764. }
  1765. if (i == sizeof(buf) - 1)
  1766. ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
  1767. /* for some reason the debug prints end with \n, remove that */
  1768. if (skb->data[i - 1] == '\n')
  1769. i--;
  1770. /* the last byte is always reserved for the null character */
  1771. buf[i] = '\0';
  1772. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);
  1773. }
  1774. static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
  1775. {
  1776. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
  1777. }
  1778. static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
  1779. struct sk_buff *skb)
  1780. {
  1781. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
  1782. }
  1783. static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
  1784. struct sk_buff *skb)
  1785. {
  1786. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
  1787. }
  1788. static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
  1789. struct sk_buff *skb)
  1790. {
  1791. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
  1792. }
  1793. static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
  1794. struct sk_buff *skb)
  1795. {
  1796. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
  1797. }
  1798. static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
  1799. struct sk_buff *skb)
  1800. {
  1801. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
  1802. }
  1803. static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
  1804. struct sk_buff *skb)
  1805. {
  1806. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
  1807. }
  1808. static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
  1809. struct sk_buff *skb)
  1810. {
  1811. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
  1812. }
  1813. static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
  1814. struct sk_buff *skb)
  1815. {
  1816. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
  1817. }
  1818. static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
  1819. struct sk_buff *skb)
  1820. {
  1821. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
  1822. }
  1823. static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
  1824. struct sk_buff *skb)
  1825. {
  1826. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
  1827. }
  1828. static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
  1829. struct sk_buff *skb)
  1830. {
  1831. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
  1832. }
  1833. static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
  1834. struct sk_buff *skb)
  1835. {
  1836. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
  1837. }
  1838. static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
  1839. struct sk_buff *skb)
  1840. {
  1841. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
  1842. }
  1843. static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
  1844. struct sk_buff *skb)
  1845. {
  1846. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
  1847. }
  1848. static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
  1849. struct sk_buff *skb)
  1850. {
  1851. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
  1852. }
  1853. static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
  1854. struct sk_buff *skb)
  1855. {
  1856. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
  1857. }
  1858. static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
  1859. u32 num_units, u32 unit_len)
  1860. {
  1861. dma_addr_t paddr;
  1862. u32 pool_size;
  1863. int idx = ar->wmi.num_mem_chunks;
  1864. pool_size = num_units * round_up(unit_len, 4);
  1865. if (!pool_size)
  1866. return -EINVAL;
  1867. ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
  1868. pool_size,
  1869. &paddr,
  1870. GFP_ATOMIC);
  1871. if (!ar->wmi.mem_chunks[idx].vaddr) {
  1872. ath10k_warn(ar, "failed to allocate memory chunk\n");
  1873. return -ENOMEM;
  1874. }
  1875. memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
  1876. ar->wmi.mem_chunks[idx].paddr = paddr;
  1877. ar->wmi.mem_chunks[idx].len = pool_size;
  1878. ar->wmi.mem_chunks[idx].req_id = req_id;
  1879. ar->wmi.num_mem_chunks++;
  1880. return 0;
  1881. }
  1882. static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
  1883. struct sk_buff *skb)
  1884. {
  1885. struct wmi_service_ready_event *ev = (void *)skb->data;
  1886. DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {};
  1887. if (skb->len < sizeof(*ev)) {
  1888. ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
  1889. skb->len, sizeof(*ev));
  1890. return;
  1891. }
  1892. ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
  1893. ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
  1894. ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
  1895. ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
  1896. ar->fw_version_major =
  1897. (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
  1898. ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
  1899. ar->fw_version_release =
  1900. (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
  1901. ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
  1902. ar->phy_capability = __le32_to_cpu(ev->phy_capability);
  1903. ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
  1904. /* only manually set fw features when not using FW IE format */
  1905. if (ar->fw_api == 1 && ar->fw_version_build > 636)
  1906. set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
  1907. if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
  1908. ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
  1909. ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
  1910. ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
  1911. }
  1912. ar->ath_common.regulatory.current_rd =
  1913. __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
  1914. wmi_main_svc_map(ev->wmi_service_bitmap, svc_bmap);
  1915. ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
  1916. ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
  1917. ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
  1918. if (strlen(ar->hw->wiphy->fw_version) == 0) {
  1919. snprintf(ar->hw->wiphy->fw_version,
  1920. sizeof(ar->hw->wiphy->fw_version),
  1921. "%u.%u.%u.%u",
  1922. ar->fw_version_major,
  1923. ar->fw_version_minor,
  1924. ar->fw_version_release,
  1925. ar->fw_version_build);
  1926. }
  1927. /* FIXME: it probably should be better to support this */
  1928. if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
  1929. ath10k_warn(ar, "target requested %d memory chunks; ignoring\n",
  1930. __le32_to_cpu(ev->num_mem_reqs));
  1931. }
  1932. ath10k_dbg(ar, ATH10K_DBG_WMI,
  1933. "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
  1934. __le32_to_cpu(ev->sw_version),
  1935. __le32_to_cpu(ev->sw_version_1),
  1936. __le32_to_cpu(ev->abi_version),
  1937. __le32_to_cpu(ev->phy_capability),
  1938. __le32_to_cpu(ev->ht_cap_info),
  1939. __le32_to_cpu(ev->vht_cap_info),
  1940. __le32_to_cpu(ev->vht_supp_mcs),
  1941. __le32_to_cpu(ev->sys_cap_info),
  1942. __le32_to_cpu(ev->num_mem_reqs),
  1943. __le32_to_cpu(ev->num_rf_chains));
  1944. complete(&ar->wmi.service_ready);
  1945. }
  1946. static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
  1947. struct sk_buff *skb)
  1948. {
  1949. u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
  1950. int ret;
  1951. struct wmi_service_ready_event_10x *ev = (void *)skb->data;
  1952. DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {};
  1953. if (skb->len < sizeof(*ev)) {
  1954. ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
  1955. skb->len, sizeof(*ev));
  1956. return;
  1957. }
  1958. ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
  1959. ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
  1960. ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
  1961. ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
  1962. ar->fw_version_major =
  1963. (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
  1964. ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
  1965. ar->phy_capability = __le32_to_cpu(ev->phy_capability);
  1966. ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
  1967. if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
  1968. ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
  1969. ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
  1970. ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
  1971. }
  1972. ar->ath_common.regulatory.current_rd =
  1973. __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
  1974. wmi_10x_svc_map(ev->wmi_service_bitmap, svc_bmap);
  1975. ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
  1976. ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
  1977. ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
  1978. if (strlen(ar->hw->wiphy->fw_version) == 0) {
  1979. snprintf(ar->hw->wiphy->fw_version,
  1980. sizeof(ar->hw->wiphy->fw_version),
  1981. "%u.%u",
  1982. ar->fw_version_major,
  1983. ar->fw_version_minor);
  1984. }
  1985. num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
  1986. if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
  1987. ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
  1988. num_mem_reqs);
  1989. return;
  1990. }
  1991. if (!num_mem_reqs)
  1992. goto exit;
  1993. ath10k_dbg(ar, ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
  1994. num_mem_reqs);
  1995. for (i = 0; i < num_mem_reqs; ++i) {
  1996. req_id = __le32_to_cpu(ev->mem_reqs[i].req_id);
  1997. num_units = __le32_to_cpu(ev->mem_reqs[i].num_units);
  1998. unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size);
  1999. num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info);
  2000. if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
  2001. /* number of units to allocate is number of
  2002. * peers, 1 extra for self peer on target */
  2003. /* this needs to be tied, host and target
  2004. * can get out of sync */
  2005. num_units = TARGET_10X_NUM_PEERS + 1;
  2006. else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
  2007. num_units = TARGET_10X_NUM_VDEVS + 1;
  2008. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2009. "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
  2010. req_id,
  2011. __le32_to_cpu(ev->mem_reqs[i].num_units),
  2012. num_unit_info,
  2013. unit_size,
  2014. num_units);
  2015. ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
  2016. unit_size);
  2017. if (ret)
  2018. return;
  2019. }
  2020. exit:
  2021. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2022. "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
  2023. __le32_to_cpu(ev->sw_version),
  2024. __le32_to_cpu(ev->abi_version),
  2025. __le32_to_cpu(ev->phy_capability),
  2026. __le32_to_cpu(ev->ht_cap_info),
  2027. __le32_to_cpu(ev->vht_cap_info),
  2028. __le32_to_cpu(ev->vht_supp_mcs),
  2029. __le32_to_cpu(ev->sys_cap_info),
  2030. __le32_to_cpu(ev->num_mem_reqs),
  2031. __le32_to_cpu(ev->num_rf_chains));
  2032. complete(&ar->wmi.service_ready);
  2033. }
  2034. static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
  2035. {
  2036. struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
  2037. if (WARN_ON(skb->len < sizeof(*ev)))
  2038. return -EINVAL;
  2039. ether_addr_copy(ar->mac_addr, ev->mac_addr.addr);
  2040. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2041. "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
  2042. __le32_to_cpu(ev->sw_version),
  2043. __le32_to_cpu(ev->abi_version),
  2044. ev->mac_addr.addr,
  2045. __le32_to_cpu(ev->status), skb->len, sizeof(*ev));
  2046. complete(&ar->wmi.unified_ready);
  2047. return 0;
  2048. }
  2049. static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
  2050. {
  2051. struct wmi_cmd_hdr *cmd_hdr;
  2052. enum wmi_event_id id;
  2053. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  2054. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  2055. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  2056. return;
  2057. trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  2058. switch (id) {
  2059. case WMI_MGMT_RX_EVENTID:
  2060. ath10k_wmi_event_mgmt_rx(ar, skb);
  2061. /* mgmt_rx() owns the skb now! */
  2062. return;
  2063. case WMI_SCAN_EVENTID:
  2064. ath10k_wmi_event_scan(ar, skb);
  2065. break;
  2066. case WMI_CHAN_INFO_EVENTID:
  2067. ath10k_wmi_event_chan_info(ar, skb);
  2068. break;
  2069. case WMI_ECHO_EVENTID:
  2070. ath10k_wmi_event_echo(ar, skb);
  2071. break;
  2072. case WMI_DEBUG_MESG_EVENTID:
  2073. ath10k_wmi_event_debug_mesg(ar, skb);
  2074. break;
  2075. case WMI_UPDATE_STATS_EVENTID:
  2076. ath10k_wmi_event_update_stats(ar, skb);
  2077. break;
  2078. case WMI_VDEV_START_RESP_EVENTID:
  2079. ath10k_wmi_event_vdev_start_resp(ar, skb);
  2080. break;
  2081. case WMI_VDEV_STOPPED_EVENTID:
  2082. ath10k_wmi_event_vdev_stopped(ar, skb);
  2083. break;
  2084. case WMI_PEER_STA_KICKOUT_EVENTID:
  2085. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  2086. break;
  2087. case WMI_HOST_SWBA_EVENTID:
  2088. ath10k_wmi_event_host_swba(ar, skb);
  2089. break;
  2090. case WMI_TBTTOFFSET_UPDATE_EVENTID:
  2091. ath10k_wmi_event_tbttoffset_update(ar, skb);
  2092. break;
  2093. case WMI_PHYERR_EVENTID:
  2094. ath10k_wmi_event_phyerr(ar, skb);
  2095. break;
  2096. case WMI_ROAM_EVENTID:
  2097. ath10k_wmi_event_roam(ar, skb);
  2098. break;
  2099. case WMI_PROFILE_MATCH:
  2100. ath10k_wmi_event_profile_match(ar, skb);
  2101. break;
  2102. case WMI_DEBUG_PRINT_EVENTID:
  2103. ath10k_wmi_event_debug_print(ar, skb);
  2104. break;
  2105. case WMI_PDEV_QVIT_EVENTID:
  2106. ath10k_wmi_event_pdev_qvit(ar, skb);
  2107. break;
  2108. case WMI_WLAN_PROFILE_DATA_EVENTID:
  2109. ath10k_wmi_event_wlan_profile_data(ar, skb);
  2110. break;
  2111. case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
  2112. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  2113. break;
  2114. case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
  2115. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  2116. break;
  2117. case WMI_RTT_ERROR_REPORT_EVENTID:
  2118. ath10k_wmi_event_rtt_error_report(ar, skb);
  2119. break;
  2120. case WMI_WOW_WAKEUP_HOST_EVENTID:
  2121. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  2122. break;
  2123. case WMI_DCS_INTERFERENCE_EVENTID:
  2124. ath10k_wmi_event_dcs_interference(ar, skb);
  2125. break;
  2126. case WMI_PDEV_TPC_CONFIG_EVENTID:
  2127. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  2128. break;
  2129. case WMI_PDEV_FTM_INTG_EVENTID:
  2130. ath10k_wmi_event_pdev_ftm_intg(ar, skb);
  2131. break;
  2132. case WMI_GTK_OFFLOAD_STATUS_EVENTID:
  2133. ath10k_wmi_event_gtk_offload_status(ar, skb);
  2134. break;
  2135. case WMI_GTK_REKEY_FAIL_EVENTID:
  2136. ath10k_wmi_event_gtk_rekey_fail(ar, skb);
  2137. break;
  2138. case WMI_TX_DELBA_COMPLETE_EVENTID:
  2139. ath10k_wmi_event_delba_complete(ar, skb);
  2140. break;
  2141. case WMI_TX_ADDBA_COMPLETE_EVENTID:
  2142. ath10k_wmi_event_addba_complete(ar, skb);
  2143. break;
  2144. case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
  2145. ath10k_wmi_event_vdev_install_key_complete(ar, skb);
  2146. break;
  2147. case WMI_SERVICE_READY_EVENTID:
  2148. ath10k_wmi_service_ready_event_rx(ar, skb);
  2149. break;
  2150. case WMI_READY_EVENTID:
  2151. ath10k_wmi_ready_event_rx(ar, skb);
  2152. break;
  2153. default:
  2154. ath10k_warn(ar, "Unknown eventid: %d\n", id);
  2155. break;
  2156. }
  2157. dev_kfree_skb(skb);
  2158. }
  2159. static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
  2160. {
  2161. struct wmi_cmd_hdr *cmd_hdr;
  2162. enum wmi_10x_event_id id;
  2163. bool consumed;
  2164. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  2165. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  2166. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  2167. return;
  2168. trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  2169. consumed = ath10k_tm_event_wmi(ar, id, skb);
  2170. /* Ready event must be handled normally also in UTF mode so that we
  2171. * know the UTF firmware has booted, others we are just bypass WMI
  2172. * events to testmode.
  2173. */
  2174. if (consumed && id != WMI_10X_READY_EVENTID) {
  2175. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2176. "wmi testmode consumed 0x%x\n", id);
  2177. goto out;
  2178. }
  2179. switch (id) {
  2180. case WMI_10X_MGMT_RX_EVENTID:
  2181. ath10k_wmi_event_mgmt_rx(ar, skb);
  2182. /* mgmt_rx() owns the skb now! */
  2183. return;
  2184. case WMI_10X_SCAN_EVENTID:
  2185. ath10k_wmi_event_scan(ar, skb);
  2186. break;
  2187. case WMI_10X_CHAN_INFO_EVENTID:
  2188. ath10k_wmi_event_chan_info(ar, skb);
  2189. break;
  2190. case WMI_10X_ECHO_EVENTID:
  2191. ath10k_wmi_event_echo(ar, skb);
  2192. break;
  2193. case WMI_10X_DEBUG_MESG_EVENTID:
  2194. ath10k_wmi_event_debug_mesg(ar, skb);
  2195. break;
  2196. case WMI_10X_UPDATE_STATS_EVENTID:
  2197. ath10k_wmi_event_update_stats(ar, skb);
  2198. break;
  2199. case WMI_10X_VDEV_START_RESP_EVENTID:
  2200. ath10k_wmi_event_vdev_start_resp(ar, skb);
  2201. break;
  2202. case WMI_10X_VDEV_STOPPED_EVENTID:
  2203. ath10k_wmi_event_vdev_stopped(ar, skb);
  2204. break;
  2205. case WMI_10X_PEER_STA_KICKOUT_EVENTID:
  2206. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  2207. break;
  2208. case WMI_10X_HOST_SWBA_EVENTID:
  2209. ath10k_wmi_event_host_swba(ar, skb);
  2210. break;
  2211. case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
  2212. ath10k_wmi_event_tbttoffset_update(ar, skb);
  2213. break;
  2214. case WMI_10X_PHYERR_EVENTID:
  2215. ath10k_wmi_event_phyerr(ar, skb);
  2216. break;
  2217. case WMI_10X_ROAM_EVENTID:
  2218. ath10k_wmi_event_roam(ar, skb);
  2219. break;
  2220. case WMI_10X_PROFILE_MATCH:
  2221. ath10k_wmi_event_profile_match(ar, skb);
  2222. break;
  2223. case WMI_10X_DEBUG_PRINT_EVENTID:
  2224. ath10k_wmi_event_debug_print(ar, skb);
  2225. break;
  2226. case WMI_10X_PDEV_QVIT_EVENTID:
  2227. ath10k_wmi_event_pdev_qvit(ar, skb);
  2228. break;
  2229. case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
  2230. ath10k_wmi_event_wlan_profile_data(ar, skb);
  2231. break;
  2232. case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
  2233. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  2234. break;
  2235. case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
  2236. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  2237. break;
  2238. case WMI_10X_RTT_ERROR_REPORT_EVENTID:
  2239. ath10k_wmi_event_rtt_error_report(ar, skb);
  2240. break;
  2241. case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
  2242. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  2243. break;
  2244. case WMI_10X_DCS_INTERFERENCE_EVENTID:
  2245. ath10k_wmi_event_dcs_interference(ar, skb);
  2246. break;
  2247. case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
  2248. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  2249. break;
  2250. case WMI_10X_INST_RSSI_STATS_EVENTID:
  2251. ath10k_wmi_event_inst_rssi_stats(ar, skb);
  2252. break;
  2253. case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
  2254. ath10k_wmi_event_vdev_standby_req(ar, skb);
  2255. break;
  2256. case WMI_10X_VDEV_RESUME_REQ_EVENTID:
  2257. ath10k_wmi_event_vdev_resume_req(ar, skb);
  2258. break;
  2259. case WMI_10X_SERVICE_READY_EVENTID:
  2260. ath10k_wmi_10x_service_ready_event_rx(ar, skb);
  2261. break;
  2262. case WMI_10X_READY_EVENTID:
  2263. ath10k_wmi_ready_event_rx(ar, skb);
  2264. break;
  2265. case WMI_10X_PDEV_UTF_EVENTID:
  2266. /* ignore utf events */
  2267. break;
  2268. default:
  2269. ath10k_warn(ar, "Unknown eventid: %d\n", id);
  2270. break;
  2271. }
  2272. out:
  2273. dev_kfree_skb(skb);
  2274. }
  2275. static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb)
  2276. {
  2277. struct wmi_cmd_hdr *cmd_hdr;
  2278. enum wmi_10_2_event_id id;
  2279. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  2280. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  2281. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  2282. return;
  2283. trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  2284. switch (id) {
  2285. case WMI_10_2_MGMT_RX_EVENTID:
  2286. ath10k_wmi_event_mgmt_rx(ar, skb);
  2287. /* mgmt_rx() owns the skb now! */
  2288. return;
  2289. case WMI_10_2_SCAN_EVENTID:
  2290. ath10k_wmi_event_scan(ar, skb);
  2291. break;
  2292. case WMI_10_2_CHAN_INFO_EVENTID:
  2293. ath10k_wmi_event_chan_info(ar, skb);
  2294. break;
  2295. case WMI_10_2_ECHO_EVENTID:
  2296. ath10k_wmi_event_echo(ar, skb);
  2297. break;
  2298. case WMI_10_2_DEBUG_MESG_EVENTID:
  2299. ath10k_wmi_event_debug_mesg(ar, skb);
  2300. break;
  2301. case WMI_10_2_UPDATE_STATS_EVENTID:
  2302. ath10k_wmi_event_update_stats(ar, skb);
  2303. break;
  2304. case WMI_10_2_VDEV_START_RESP_EVENTID:
  2305. ath10k_wmi_event_vdev_start_resp(ar, skb);
  2306. break;
  2307. case WMI_10_2_VDEV_STOPPED_EVENTID:
  2308. ath10k_wmi_event_vdev_stopped(ar, skb);
  2309. break;
  2310. case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
  2311. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  2312. break;
  2313. case WMI_10_2_HOST_SWBA_EVENTID:
  2314. ath10k_wmi_event_host_swba(ar, skb);
  2315. break;
  2316. case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
  2317. ath10k_wmi_event_tbttoffset_update(ar, skb);
  2318. break;
  2319. case WMI_10_2_PHYERR_EVENTID:
  2320. ath10k_wmi_event_phyerr(ar, skb);
  2321. break;
  2322. case WMI_10_2_ROAM_EVENTID:
  2323. ath10k_wmi_event_roam(ar, skb);
  2324. break;
  2325. case WMI_10_2_PROFILE_MATCH:
  2326. ath10k_wmi_event_profile_match(ar, skb);
  2327. break;
  2328. case WMI_10_2_DEBUG_PRINT_EVENTID:
  2329. ath10k_wmi_event_debug_print(ar, skb);
  2330. break;
  2331. case WMI_10_2_PDEV_QVIT_EVENTID:
  2332. ath10k_wmi_event_pdev_qvit(ar, skb);
  2333. break;
  2334. case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
  2335. ath10k_wmi_event_wlan_profile_data(ar, skb);
  2336. break;
  2337. case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
  2338. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  2339. break;
  2340. case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
  2341. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  2342. break;
  2343. case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
  2344. ath10k_wmi_event_rtt_error_report(ar, skb);
  2345. break;
  2346. case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
  2347. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  2348. break;
  2349. case WMI_10_2_DCS_INTERFERENCE_EVENTID:
  2350. ath10k_wmi_event_dcs_interference(ar, skb);
  2351. break;
  2352. case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
  2353. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  2354. break;
  2355. case WMI_10_2_INST_RSSI_STATS_EVENTID:
  2356. ath10k_wmi_event_inst_rssi_stats(ar, skb);
  2357. break;
  2358. case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
  2359. ath10k_wmi_event_vdev_standby_req(ar, skb);
  2360. break;
  2361. case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
  2362. ath10k_wmi_event_vdev_resume_req(ar, skb);
  2363. break;
  2364. case WMI_10_2_SERVICE_READY_EVENTID:
  2365. ath10k_wmi_10x_service_ready_event_rx(ar, skb);
  2366. break;
  2367. case WMI_10_2_READY_EVENTID:
  2368. ath10k_wmi_ready_event_rx(ar, skb);
  2369. break;
  2370. case WMI_10_2_RTT_KEEPALIVE_EVENTID:
  2371. case WMI_10_2_GPIO_INPUT_EVENTID:
  2372. case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
  2373. case WMI_10_2_GENERIC_BUFFER_EVENTID:
  2374. case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
  2375. case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
  2376. case WMI_10_2_WDS_PEER_EVENTID:
  2377. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2378. "received event id %d not implemented\n", id);
  2379. break;
  2380. default:
  2381. ath10k_warn(ar, "Unknown eventid: %d\n", id);
  2382. break;
  2383. }
  2384. dev_kfree_skb(skb);
  2385. }
  2386. static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
  2387. {
  2388. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  2389. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  2390. ath10k_wmi_10_2_process_rx(ar, skb);
  2391. else
  2392. ath10k_wmi_10x_process_rx(ar, skb);
  2393. } else {
  2394. ath10k_wmi_main_process_rx(ar, skb);
  2395. }
  2396. }
  2397. /* WMI Initialization functions */
  2398. int ath10k_wmi_attach(struct ath10k *ar)
  2399. {
  2400. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  2401. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  2402. ar->wmi.cmd = &wmi_10_2_cmd_map;
  2403. else
  2404. ar->wmi.cmd = &wmi_10x_cmd_map;
  2405. ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
  2406. ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
  2407. } else {
  2408. ar->wmi.cmd = &wmi_cmd_map;
  2409. ar->wmi.vdev_param = &wmi_vdev_param_map;
  2410. ar->wmi.pdev_param = &wmi_pdev_param_map;
  2411. }
  2412. init_completion(&ar->wmi.service_ready);
  2413. init_completion(&ar->wmi.unified_ready);
  2414. init_waitqueue_head(&ar->wmi.tx_credits_wq);
  2415. return 0;
  2416. }
  2417. void ath10k_wmi_detach(struct ath10k *ar)
  2418. {
  2419. int i;
  2420. /* free the host memory chunks requested by firmware */
  2421. for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  2422. dma_free_coherent(ar->dev,
  2423. ar->wmi.mem_chunks[i].len,
  2424. ar->wmi.mem_chunks[i].vaddr,
  2425. ar->wmi.mem_chunks[i].paddr);
  2426. }
  2427. ar->wmi.num_mem_chunks = 0;
  2428. }
  2429. int ath10k_wmi_connect(struct ath10k *ar)
  2430. {
  2431. int status;
  2432. struct ath10k_htc_svc_conn_req conn_req;
  2433. struct ath10k_htc_svc_conn_resp conn_resp;
  2434. memset(&conn_req, 0, sizeof(conn_req));
  2435. memset(&conn_resp, 0, sizeof(conn_resp));
  2436. /* these fields are the same for all service endpoints */
  2437. conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
  2438. conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
  2439. conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
  2440. /* connect to control service */
  2441. conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
  2442. status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
  2443. if (status) {
  2444. ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
  2445. status);
  2446. return status;
  2447. }
  2448. ar->wmi.eid = conn_resp.eid;
  2449. return 0;
  2450. }
  2451. static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
  2452. u16 rd2g, u16 rd5g, u16 ctl2g,
  2453. u16 ctl5g)
  2454. {
  2455. struct wmi_pdev_set_regdomain_cmd *cmd;
  2456. struct sk_buff *skb;
  2457. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2458. if (!skb)
  2459. return -ENOMEM;
  2460. cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
  2461. cmd->reg_domain = __cpu_to_le32(rd);
  2462. cmd->reg_domain_2G = __cpu_to_le32(rd2g);
  2463. cmd->reg_domain_5G = __cpu_to_le32(rd5g);
  2464. cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
  2465. cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
  2466. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2467. "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
  2468. rd, rd2g, rd5g, ctl2g, ctl5g);
  2469. return ath10k_wmi_cmd_send(ar, skb,
  2470. ar->wmi.cmd->pdev_set_regdomain_cmdid);
  2471. }
  2472. static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
  2473. u16 rd2g, u16 rd5g,
  2474. u16 ctl2g, u16 ctl5g,
  2475. enum wmi_dfs_region dfs_reg)
  2476. {
  2477. struct wmi_pdev_set_regdomain_cmd_10x *cmd;
  2478. struct sk_buff *skb;
  2479. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2480. if (!skb)
  2481. return -ENOMEM;
  2482. cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
  2483. cmd->reg_domain = __cpu_to_le32(rd);
  2484. cmd->reg_domain_2G = __cpu_to_le32(rd2g);
  2485. cmd->reg_domain_5G = __cpu_to_le32(rd5g);
  2486. cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
  2487. cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
  2488. cmd->dfs_domain = __cpu_to_le32(dfs_reg);
  2489. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2490. "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
  2491. rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
  2492. return ath10k_wmi_cmd_send(ar, skb,
  2493. ar->wmi.cmd->pdev_set_regdomain_cmdid);
  2494. }
  2495. int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
  2496. u16 rd5g, u16 ctl2g, u16 ctl5g,
  2497. enum wmi_dfs_region dfs_reg)
  2498. {
  2499. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  2500. return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
  2501. ctl2g, ctl5g, dfs_reg);
  2502. else
  2503. return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
  2504. ctl2g, ctl5g);
  2505. }
  2506. int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
  2507. const struct wmi_channel_arg *arg)
  2508. {
  2509. struct wmi_set_channel_cmd *cmd;
  2510. struct sk_buff *skb;
  2511. u32 ch_flags = 0;
  2512. if (arg->passive)
  2513. return -EINVAL;
  2514. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2515. if (!skb)
  2516. return -ENOMEM;
  2517. if (arg->chan_radar)
  2518. ch_flags |= WMI_CHAN_FLAG_DFS;
  2519. cmd = (struct wmi_set_channel_cmd *)skb->data;
  2520. cmd->chan.mhz = __cpu_to_le32(arg->freq);
  2521. cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
  2522. cmd->chan.mode = arg->mode;
  2523. cmd->chan.flags |= __cpu_to_le32(ch_flags);
  2524. cmd->chan.min_power = arg->min_power;
  2525. cmd->chan.max_power = arg->max_power;
  2526. cmd->chan.reg_power = arg->max_reg_power;
  2527. cmd->chan.reg_classid = arg->reg_class_id;
  2528. cmd->chan.antenna_max = arg->max_antenna_gain;
  2529. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2530. "wmi set channel mode %d freq %d\n",
  2531. arg->mode, arg->freq);
  2532. return ath10k_wmi_cmd_send(ar, skb,
  2533. ar->wmi.cmd->pdev_set_channel_cmdid);
  2534. }
  2535. int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
  2536. {
  2537. struct wmi_pdev_suspend_cmd *cmd;
  2538. struct sk_buff *skb;
  2539. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2540. if (!skb)
  2541. return -ENOMEM;
  2542. cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
  2543. cmd->suspend_opt = __cpu_to_le32(suspend_opt);
  2544. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
  2545. }
  2546. int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
  2547. {
  2548. struct sk_buff *skb;
  2549. skb = ath10k_wmi_alloc_skb(ar, 0);
  2550. if (skb == NULL)
  2551. return -ENOMEM;
  2552. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
  2553. }
  2554. int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
  2555. {
  2556. struct wmi_pdev_set_param_cmd *cmd;
  2557. struct sk_buff *skb;
  2558. if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
  2559. ath10k_warn(ar, "pdev param %d not supported by firmware\n",
  2560. id);
  2561. return -EOPNOTSUPP;
  2562. }
  2563. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2564. if (!skb)
  2565. return -ENOMEM;
  2566. cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
  2567. cmd->param_id = __cpu_to_le32(id);
  2568. cmd->param_value = __cpu_to_le32(value);
  2569. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
  2570. id, value);
  2571. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
  2572. }
  2573. static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
  2574. {
  2575. struct wmi_init_cmd *cmd;
  2576. struct sk_buff *buf;
  2577. struct wmi_resource_config config = {};
  2578. u32 len, val;
  2579. int i;
  2580. config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
  2581. config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
  2582. config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
  2583. config.num_offload_reorder_bufs =
  2584. __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
  2585. config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
  2586. config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
  2587. config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
  2588. config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
  2589. config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
  2590. config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  2591. config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  2592. config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  2593. config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
  2594. config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
  2595. config.scan_max_pending_reqs =
  2596. __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
  2597. config.bmiss_offload_max_vdev =
  2598. __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
  2599. config.roam_offload_max_vdev =
  2600. __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
  2601. config.roam_offload_max_ap_profiles =
  2602. __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
  2603. config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
  2604. config.num_mcast_table_elems =
  2605. __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
  2606. config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
  2607. config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
  2608. config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
  2609. config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
  2610. config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
  2611. val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  2612. config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  2613. config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
  2614. config.gtk_offload_max_vdev =
  2615. __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
  2616. config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
  2617. config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
  2618. len = sizeof(*cmd) +
  2619. (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  2620. buf = ath10k_wmi_alloc_skb(ar, len);
  2621. if (!buf)
  2622. return -ENOMEM;
  2623. cmd = (struct wmi_init_cmd *)buf->data;
  2624. if (ar->wmi.num_mem_chunks == 0) {
  2625. cmd->num_host_mem_chunks = 0;
  2626. goto out;
  2627. }
  2628. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
  2629. ar->wmi.num_mem_chunks);
  2630. cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
  2631. for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  2632. cmd->host_mem_chunks[i].ptr =
  2633. __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
  2634. cmd->host_mem_chunks[i].size =
  2635. __cpu_to_le32(ar->wmi.mem_chunks[i].len);
  2636. cmd->host_mem_chunks[i].req_id =
  2637. __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
  2638. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2639. "wmi chunk %d len %d requested, addr 0x%llx\n",
  2640. i,
  2641. ar->wmi.mem_chunks[i].len,
  2642. (unsigned long long)ar->wmi.mem_chunks[i].paddr);
  2643. }
  2644. out:
  2645. memcpy(&cmd->resource_config, &config, sizeof(config));
  2646. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
  2647. return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
  2648. }
  2649. static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
  2650. {
  2651. struct wmi_init_cmd_10x *cmd;
  2652. struct sk_buff *buf;
  2653. struct wmi_resource_config_10x config = {};
  2654. u32 len, val;
  2655. int i;
  2656. config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
  2657. config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
  2658. config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
  2659. config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
  2660. config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
  2661. config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
  2662. config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
  2663. config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2664. config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2665. config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2666. config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
  2667. config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
  2668. config.scan_max_pending_reqs =
  2669. __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
  2670. config.bmiss_offload_max_vdev =
  2671. __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
  2672. config.roam_offload_max_vdev =
  2673. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
  2674. config.roam_offload_max_ap_profiles =
  2675. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
  2676. config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
  2677. config.num_mcast_table_elems =
  2678. __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
  2679. config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
  2680. config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
  2681. config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
  2682. config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
  2683. config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
  2684. val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  2685. config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  2686. config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
  2687. config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
  2688. config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
  2689. len = sizeof(*cmd) +
  2690. (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  2691. buf = ath10k_wmi_alloc_skb(ar, len);
  2692. if (!buf)
  2693. return -ENOMEM;
  2694. cmd = (struct wmi_init_cmd_10x *)buf->data;
  2695. if (ar->wmi.num_mem_chunks == 0) {
  2696. cmd->num_host_mem_chunks = 0;
  2697. goto out;
  2698. }
  2699. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
  2700. ar->wmi.num_mem_chunks);
  2701. cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
  2702. for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  2703. cmd->host_mem_chunks[i].ptr =
  2704. __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
  2705. cmd->host_mem_chunks[i].size =
  2706. __cpu_to_le32(ar->wmi.mem_chunks[i].len);
  2707. cmd->host_mem_chunks[i].req_id =
  2708. __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
  2709. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2710. "wmi chunk %d len %d requested, addr 0x%llx\n",
  2711. i,
  2712. ar->wmi.mem_chunks[i].len,
  2713. (unsigned long long)ar->wmi.mem_chunks[i].paddr);
  2714. }
  2715. out:
  2716. memcpy(&cmd->resource_config, &config, sizeof(config));
  2717. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
  2718. return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
  2719. }
  2720. static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
  2721. {
  2722. struct wmi_init_cmd_10_2 *cmd;
  2723. struct sk_buff *buf;
  2724. struct wmi_resource_config_10x config = {};
  2725. u32 len, val;
  2726. int i;
  2727. config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
  2728. config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
  2729. config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
  2730. config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
  2731. config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
  2732. config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
  2733. config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
  2734. config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2735. config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2736. config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2737. config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
  2738. config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
  2739. config.scan_max_pending_reqs =
  2740. __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
  2741. config.bmiss_offload_max_vdev =
  2742. __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
  2743. config.roam_offload_max_vdev =
  2744. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
  2745. config.roam_offload_max_ap_profiles =
  2746. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
  2747. config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
  2748. config.num_mcast_table_elems =
  2749. __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
  2750. config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
  2751. config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
  2752. config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
  2753. config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
  2754. config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
  2755. val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  2756. config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  2757. config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
  2758. config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
  2759. config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
  2760. len = sizeof(*cmd) +
  2761. (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  2762. buf = ath10k_wmi_alloc_skb(ar, len);
  2763. if (!buf)
  2764. return -ENOMEM;
  2765. cmd = (struct wmi_init_cmd_10_2 *)buf->data;
  2766. if (ar->wmi.num_mem_chunks == 0) {
  2767. cmd->num_host_mem_chunks = 0;
  2768. goto out;
  2769. }
  2770. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
  2771. ar->wmi.num_mem_chunks);
  2772. cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
  2773. for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  2774. cmd->host_mem_chunks[i].ptr =
  2775. __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
  2776. cmd->host_mem_chunks[i].size =
  2777. __cpu_to_le32(ar->wmi.mem_chunks[i].len);
  2778. cmd->host_mem_chunks[i].req_id =
  2779. __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
  2780. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2781. "wmi chunk %d len %d requested, addr 0x%llx\n",
  2782. i,
  2783. ar->wmi.mem_chunks[i].len,
  2784. (unsigned long long)ar->wmi.mem_chunks[i].paddr);
  2785. }
  2786. out:
  2787. memcpy(&cmd->resource_config.common, &config, sizeof(config));
  2788. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
  2789. return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
  2790. }
  2791. int ath10k_wmi_cmd_init(struct ath10k *ar)
  2792. {
  2793. int ret;
  2794. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  2795. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  2796. ret = ath10k_wmi_10_2_cmd_init(ar);
  2797. else
  2798. ret = ath10k_wmi_10x_cmd_init(ar);
  2799. } else {
  2800. ret = ath10k_wmi_main_cmd_init(ar);
  2801. }
  2802. return ret;
  2803. }
  2804. static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
  2805. const struct wmi_start_scan_arg *arg)
  2806. {
  2807. int len;
  2808. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  2809. len = sizeof(struct wmi_start_scan_cmd_10x);
  2810. else
  2811. len = sizeof(struct wmi_start_scan_cmd);
  2812. if (arg->ie_len) {
  2813. if (!arg->ie)
  2814. return -EINVAL;
  2815. if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
  2816. return -EINVAL;
  2817. len += sizeof(struct wmi_ie_data);
  2818. len += roundup(arg->ie_len, 4);
  2819. }
  2820. if (arg->n_channels) {
  2821. if (!arg->channels)
  2822. return -EINVAL;
  2823. if (arg->n_channels > ARRAY_SIZE(arg->channels))
  2824. return -EINVAL;
  2825. len += sizeof(struct wmi_chan_list);
  2826. len += sizeof(__le32) * arg->n_channels;
  2827. }
  2828. if (arg->n_ssids) {
  2829. if (!arg->ssids)
  2830. return -EINVAL;
  2831. if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
  2832. return -EINVAL;
  2833. len += sizeof(struct wmi_ssid_list);
  2834. len += sizeof(struct wmi_ssid) * arg->n_ssids;
  2835. }
  2836. if (arg->n_bssids) {
  2837. if (!arg->bssids)
  2838. return -EINVAL;
  2839. if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
  2840. return -EINVAL;
  2841. len += sizeof(struct wmi_bssid_list);
  2842. len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
  2843. }
  2844. return len;
  2845. }
  2846. int ath10k_wmi_start_scan(struct ath10k *ar,
  2847. const struct wmi_start_scan_arg *arg)
  2848. {
  2849. struct wmi_start_scan_cmd *cmd;
  2850. struct sk_buff *skb;
  2851. struct wmi_ie_data *ie;
  2852. struct wmi_chan_list *channels;
  2853. struct wmi_ssid_list *ssids;
  2854. struct wmi_bssid_list *bssids;
  2855. u32 scan_id;
  2856. u32 scan_req_id;
  2857. int off;
  2858. int len = 0;
  2859. int i;
  2860. len = ath10k_wmi_start_scan_calc_len(ar, arg);
  2861. if (len < 0)
  2862. return len; /* len contains error code here */
  2863. skb = ath10k_wmi_alloc_skb(ar, len);
  2864. if (!skb)
  2865. return -ENOMEM;
  2866. scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
  2867. scan_id |= arg->scan_id;
  2868. scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
  2869. scan_req_id |= arg->scan_req_id;
  2870. cmd = (struct wmi_start_scan_cmd *)skb->data;
  2871. cmd->scan_id = __cpu_to_le32(scan_id);
  2872. cmd->scan_req_id = __cpu_to_le32(scan_req_id);
  2873. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  2874. cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
  2875. cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
  2876. cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
  2877. cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
  2878. cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time);
  2879. cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time);
  2880. cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
  2881. cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
  2882. cmd->idle_time = __cpu_to_le32(arg->idle_time);
  2883. cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time);
  2884. cmd->probe_delay = __cpu_to_le32(arg->probe_delay);
  2885. cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
  2886. /* TLV list starts after fields included in the struct */
  2887. /* There's just one filed that differes the two start_scan
  2888. * structures - burst_duration, which we are not using btw,
  2889. no point to make the split here, just shift the buffer to fit with
  2890. given FW */
  2891. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  2892. off = sizeof(struct wmi_start_scan_cmd_10x);
  2893. else
  2894. off = sizeof(struct wmi_start_scan_cmd);
  2895. if (arg->n_channels) {
  2896. channels = (void *)skb->data + off;
  2897. channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
  2898. channels->num_chan = __cpu_to_le32(arg->n_channels);
  2899. for (i = 0; i < arg->n_channels; i++)
  2900. channels->channel_list[i].freq =
  2901. __cpu_to_le16(arg->channels[i]);
  2902. off += sizeof(*channels);
  2903. off += sizeof(__le32) * arg->n_channels;
  2904. }
  2905. if (arg->n_ssids) {
  2906. ssids = (void *)skb->data + off;
  2907. ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
  2908. ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
  2909. for (i = 0; i < arg->n_ssids; i++) {
  2910. ssids->ssids[i].ssid_len =
  2911. __cpu_to_le32(arg->ssids[i].len);
  2912. memcpy(&ssids->ssids[i].ssid,
  2913. arg->ssids[i].ssid,
  2914. arg->ssids[i].len);
  2915. }
  2916. off += sizeof(*ssids);
  2917. off += sizeof(struct wmi_ssid) * arg->n_ssids;
  2918. }
  2919. if (arg->n_bssids) {
  2920. bssids = (void *)skb->data + off;
  2921. bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
  2922. bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
  2923. for (i = 0; i < arg->n_bssids; i++)
  2924. memcpy(&bssids->bssid_list[i],
  2925. arg->bssids[i].bssid,
  2926. ETH_ALEN);
  2927. off += sizeof(*bssids);
  2928. off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
  2929. }
  2930. if (arg->ie_len) {
  2931. ie = (void *)skb->data + off;
  2932. ie->tag = __cpu_to_le32(WMI_IE_TAG);
  2933. ie->ie_len = __cpu_to_le32(arg->ie_len);
  2934. memcpy(ie->ie_data, arg->ie, arg->ie_len);
  2935. off += sizeof(*ie);
  2936. off += roundup(arg->ie_len, 4);
  2937. }
  2938. if (off != skb->len) {
  2939. dev_kfree_skb(skb);
  2940. return -EINVAL;
  2941. }
  2942. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
  2943. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
  2944. }
  2945. void ath10k_wmi_start_scan_init(struct ath10k *ar,
  2946. struct wmi_start_scan_arg *arg)
  2947. {
  2948. /* setup commonly used values */
  2949. arg->scan_req_id = 1;
  2950. arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
  2951. arg->dwell_time_active = 50;
  2952. arg->dwell_time_passive = 150;
  2953. arg->min_rest_time = 50;
  2954. arg->max_rest_time = 500;
  2955. arg->repeat_probe_time = 0;
  2956. arg->probe_spacing_time = 0;
  2957. arg->idle_time = 0;
  2958. arg->max_scan_time = 20000;
  2959. arg->probe_delay = 5;
  2960. arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
  2961. | WMI_SCAN_EVENT_COMPLETED
  2962. | WMI_SCAN_EVENT_BSS_CHANNEL
  2963. | WMI_SCAN_EVENT_FOREIGN_CHANNEL
  2964. | WMI_SCAN_EVENT_DEQUEUED;
  2965. arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
  2966. arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
  2967. arg->n_bssids = 1;
  2968. arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
  2969. }
  2970. int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
  2971. {
  2972. struct wmi_stop_scan_cmd *cmd;
  2973. struct sk_buff *skb;
  2974. u32 scan_id;
  2975. u32 req_id;
  2976. if (arg->req_id > 0xFFF)
  2977. return -EINVAL;
  2978. if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
  2979. return -EINVAL;
  2980. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2981. if (!skb)
  2982. return -ENOMEM;
  2983. scan_id = arg->u.scan_id;
  2984. scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
  2985. req_id = arg->req_id;
  2986. req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
  2987. cmd = (struct wmi_stop_scan_cmd *)skb->data;
  2988. cmd->req_type = __cpu_to_le32(arg->req_type);
  2989. cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
  2990. cmd->scan_id = __cpu_to_le32(scan_id);
  2991. cmd->scan_req_id = __cpu_to_le32(req_id);
  2992. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2993. "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
  2994. arg->req_id, arg->req_type, arg->u.scan_id);
  2995. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
  2996. }
  2997. int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
  2998. enum wmi_vdev_type type,
  2999. enum wmi_vdev_subtype subtype,
  3000. const u8 macaddr[ETH_ALEN])
  3001. {
  3002. struct wmi_vdev_create_cmd *cmd;
  3003. struct sk_buff *skb;
  3004. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3005. if (!skb)
  3006. return -ENOMEM;
  3007. cmd = (struct wmi_vdev_create_cmd *)skb->data;
  3008. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3009. cmd->vdev_type = __cpu_to_le32(type);
  3010. cmd->vdev_subtype = __cpu_to_le32(subtype);
  3011. ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
  3012. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3013. "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
  3014. vdev_id, type, subtype, macaddr);
  3015. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
  3016. }
  3017. int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
  3018. {
  3019. struct wmi_vdev_delete_cmd *cmd;
  3020. struct sk_buff *skb;
  3021. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3022. if (!skb)
  3023. return -ENOMEM;
  3024. cmd = (struct wmi_vdev_delete_cmd *)skb->data;
  3025. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3026. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3027. "WMI vdev delete id %d\n", vdev_id);
  3028. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
  3029. }
  3030. static int
  3031. ath10k_wmi_vdev_start_restart(struct ath10k *ar,
  3032. const struct wmi_vdev_start_request_arg *arg,
  3033. u32 cmd_id)
  3034. {
  3035. struct wmi_vdev_start_request_cmd *cmd;
  3036. struct sk_buff *skb;
  3037. const char *cmdname;
  3038. u32 flags = 0;
  3039. u32 ch_flags = 0;
  3040. if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
  3041. cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
  3042. return -EINVAL;
  3043. if (WARN_ON(arg->ssid && arg->ssid_len == 0))
  3044. return -EINVAL;
  3045. if (WARN_ON(arg->hidden_ssid && !arg->ssid))
  3046. return -EINVAL;
  3047. if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
  3048. return -EINVAL;
  3049. if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
  3050. cmdname = "start";
  3051. else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
  3052. cmdname = "restart";
  3053. else
  3054. return -EINVAL; /* should not happen, we already check cmd_id */
  3055. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3056. if (!skb)
  3057. return -ENOMEM;
  3058. if (arg->hidden_ssid)
  3059. flags |= WMI_VDEV_START_HIDDEN_SSID;
  3060. if (arg->pmf_enabled)
  3061. flags |= WMI_VDEV_START_PMF_ENABLED;
  3062. if (arg->channel.chan_radar)
  3063. ch_flags |= WMI_CHAN_FLAG_DFS;
  3064. cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
  3065. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3066. cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
  3067. cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
  3068. cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
  3069. cmd->flags = __cpu_to_le32(flags);
  3070. cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
  3071. cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
  3072. if (arg->ssid) {
  3073. cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
  3074. memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
  3075. }
  3076. cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
  3077. cmd->chan.band_center_freq1 =
  3078. __cpu_to_le32(arg->channel.band_center_freq1);
  3079. cmd->chan.mode = arg->channel.mode;
  3080. cmd->chan.flags |= __cpu_to_le32(ch_flags);
  3081. cmd->chan.min_power = arg->channel.min_power;
  3082. cmd->chan.max_power = arg->channel.max_power;
  3083. cmd->chan.reg_power = arg->channel.max_reg_power;
  3084. cmd->chan.reg_classid = arg->channel.reg_class_id;
  3085. cmd->chan.antenna_max = arg->channel.max_antenna_gain;
  3086. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3087. "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
  3088. cmdname, arg->vdev_id,
  3089. flags, arg->channel.freq, arg->channel.mode,
  3090. cmd->chan.flags, arg->channel.max_power);
  3091. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  3092. }
  3093. int ath10k_wmi_vdev_start(struct ath10k *ar,
  3094. const struct wmi_vdev_start_request_arg *arg)
  3095. {
  3096. u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
  3097. return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
  3098. }
  3099. int ath10k_wmi_vdev_restart(struct ath10k *ar,
  3100. const struct wmi_vdev_start_request_arg *arg)
  3101. {
  3102. u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
  3103. return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
  3104. }
  3105. int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
  3106. {
  3107. struct wmi_vdev_stop_cmd *cmd;
  3108. struct sk_buff *skb;
  3109. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3110. if (!skb)
  3111. return -ENOMEM;
  3112. cmd = (struct wmi_vdev_stop_cmd *)skb->data;
  3113. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3114. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
  3115. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
  3116. }
  3117. int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
  3118. {
  3119. struct wmi_vdev_up_cmd *cmd;
  3120. struct sk_buff *skb;
  3121. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3122. if (!skb)
  3123. return -ENOMEM;
  3124. cmd = (struct wmi_vdev_up_cmd *)skb->data;
  3125. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3126. cmd->vdev_assoc_id = __cpu_to_le32(aid);
  3127. ether_addr_copy(cmd->vdev_bssid.addr, bssid);
  3128. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3129. "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
  3130. vdev_id, aid, bssid);
  3131. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
  3132. }
  3133. int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
  3134. {
  3135. struct wmi_vdev_down_cmd *cmd;
  3136. struct sk_buff *skb;
  3137. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3138. if (!skb)
  3139. return -ENOMEM;
  3140. cmd = (struct wmi_vdev_down_cmd *)skb->data;
  3141. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3142. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3143. "wmi mgmt vdev down id 0x%x\n", vdev_id);
  3144. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
  3145. }
  3146. int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
  3147. u32 param_id, u32 param_value)
  3148. {
  3149. struct wmi_vdev_set_param_cmd *cmd;
  3150. struct sk_buff *skb;
  3151. if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
  3152. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3153. "vdev param %d not supported by firmware\n",
  3154. param_id);
  3155. return -EOPNOTSUPP;
  3156. }
  3157. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3158. if (!skb)
  3159. return -ENOMEM;
  3160. cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
  3161. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3162. cmd->param_id = __cpu_to_le32(param_id);
  3163. cmd->param_value = __cpu_to_le32(param_value);
  3164. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3165. "wmi vdev id 0x%x set param %d value %d\n",
  3166. vdev_id, param_id, param_value);
  3167. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
  3168. }
  3169. int ath10k_wmi_vdev_install_key(struct ath10k *ar,
  3170. const struct wmi_vdev_install_key_arg *arg)
  3171. {
  3172. struct wmi_vdev_install_key_cmd *cmd;
  3173. struct sk_buff *skb;
  3174. if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
  3175. return -EINVAL;
  3176. if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
  3177. return -EINVAL;
  3178. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
  3179. if (!skb)
  3180. return -ENOMEM;
  3181. cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
  3182. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3183. cmd->key_idx = __cpu_to_le32(arg->key_idx);
  3184. cmd->key_flags = __cpu_to_le32(arg->key_flags);
  3185. cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
  3186. cmd->key_len = __cpu_to_le32(arg->key_len);
  3187. cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
  3188. cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
  3189. if (arg->macaddr)
  3190. ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
  3191. if (arg->key_data)
  3192. memcpy(cmd->key_data, arg->key_data, arg->key_len);
  3193. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3194. "wmi vdev install key idx %d cipher %d len %d\n",
  3195. arg->key_idx, arg->key_cipher, arg->key_len);
  3196. return ath10k_wmi_cmd_send(ar, skb,
  3197. ar->wmi.cmd->vdev_install_key_cmdid);
  3198. }
  3199. int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
  3200. const struct wmi_vdev_spectral_conf_arg *arg)
  3201. {
  3202. struct wmi_vdev_spectral_conf_cmd *cmd;
  3203. struct sk_buff *skb;
  3204. u32 cmdid;
  3205. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3206. if (!skb)
  3207. return -ENOMEM;
  3208. cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
  3209. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3210. cmd->scan_count = __cpu_to_le32(arg->scan_count);
  3211. cmd->scan_period = __cpu_to_le32(arg->scan_period);
  3212. cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
  3213. cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
  3214. cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
  3215. cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
  3216. cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
  3217. cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
  3218. cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
  3219. cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
  3220. cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
  3221. cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
  3222. cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
  3223. cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
  3224. cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
  3225. cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
  3226. cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
  3227. cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
  3228. cmdid = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
  3229. return ath10k_wmi_cmd_send(ar, skb, cmdid);
  3230. }
  3231. int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
  3232. u32 enable)
  3233. {
  3234. struct wmi_vdev_spectral_enable_cmd *cmd;
  3235. struct sk_buff *skb;
  3236. u32 cmdid;
  3237. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3238. if (!skb)
  3239. return -ENOMEM;
  3240. cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
  3241. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3242. cmd->trigger_cmd = __cpu_to_le32(trigger);
  3243. cmd->enable_cmd = __cpu_to_le32(enable);
  3244. cmdid = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
  3245. return ath10k_wmi_cmd_send(ar, skb, cmdid);
  3246. }
  3247. int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
  3248. const u8 peer_addr[ETH_ALEN])
  3249. {
  3250. struct wmi_peer_create_cmd *cmd;
  3251. struct sk_buff *skb;
  3252. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3253. if (!skb)
  3254. return -ENOMEM;
  3255. cmd = (struct wmi_peer_create_cmd *)skb->data;
  3256. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3257. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3258. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3259. "wmi peer create vdev_id %d peer_addr %pM\n",
  3260. vdev_id, peer_addr);
  3261. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
  3262. }
  3263. int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
  3264. const u8 peer_addr[ETH_ALEN])
  3265. {
  3266. struct wmi_peer_delete_cmd *cmd;
  3267. struct sk_buff *skb;
  3268. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3269. if (!skb)
  3270. return -ENOMEM;
  3271. cmd = (struct wmi_peer_delete_cmd *)skb->data;
  3272. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3273. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3274. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3275. "wmi peer delete vdev_id %d peer_addr %pM\n",
  3276. vdev_id, peer_addr);
  3277. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
  3278. }
  3279. int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
  3280. const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  3281. {
  3282. struct wmi_peer_flush_tids_cmd *cmd;
  3283. struct sk_buff *skb;
  3284. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3285. if (!skb)
  3286. return -ENOMEM;
  3287. cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
  3288. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3289. cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
  3290. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3291. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3292. "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
  3293. vdev_id, peer_addr, tid_bitmap);
  3294. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
  3295. }
  3296. int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
  3297. const u8 *peer_addr, enum wmi_peer_param param_id,
  3298. u32 param_value)
  3299. {
  3300. struct wmi_peer_set_param_cmd *cmd;
  3301. struct sk_buff *skb;
  3302. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3303. if (!skb)
  3304. return -ENOMEM;
  3305. cmd = (struct wmi_peer_set_param_cmd *)skb->data;
  3306. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3307. cmd->param_id = __cpu_to_le32(param_id);
  3308. cmd->param_value = __cpu_to_le32(param_value);
  3309. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3310. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3311. "wmi vdev %d peer 0x%pM set param %d value %d\n",
  3312. vdev_id, peer_addr, param_id, param_value);
  3313. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
  3314. }
  3315. int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
  3316. enum wmi_sta_ps_mode psmode)
  3317. {
  3318. struct wmi_sta_powersave_mode_cmd *cmd;
  3319. struct sk_buff *skb;
  3320. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3321. if (!skb)
  3322. return -ENOMEM;
  3323. cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
  3324. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3325. cmd->sta_ps_mode = __cpu_to_le32(psmode);
  3326. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3327. "wmi set powersave id 0x%x mode %d\n",
  3328. vdev_id, psmode);
  3329. return ath10k_wmi_cmd_send(ar, skb,
  3330. ar->wmi.cmd->sta_powersave_mode_cmdid);
  3331. }
  3332. int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
  3333. enum wmi_sta_powersave_param param_id,
  3334. u32 value)
  3335. {
  3336. struct wmi_sta_powersave_param_cmd *cmd;
  3337. struct sk_buff *skb;
  3338. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3339. if (!skb)
  3340. return -ENOMEM;
  3341. cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
  3342. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3343. cmd->param_id = __cpu_to_le32(param_id);
  3344. cmd->param_value = __cpu_to_le32(value);
  3345. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3346. "wmi sta ps param vdev_id 0x%x param %d value %d\n",
  3347. vdev_id, param_id, value);
  3348. return ath10k_wmi_cmd_send(ar, skb,
  3349. ar->wmi.cmd->sta_powersave_param_cmdid);
  3350. }
  3351. int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  3352. enum wmi_ap_ps_peer_param param_id, u32 value)
  3353. {
  3354. struct wmi_ap_ps_peer_cmd *cmd;
  3355. struct sk_buff *skb;
  3356. if (!mac)
  3357. return -EINVAL;
  3358. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3359. if (!skb)
  3360. return -ENOMEM;
  3361. cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
  3362. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3363. cmd->param_id = __cpu_to_le32(param_id);
  3364. cmd->param_value = __cpu_to_le32(value);
  3365. ether_addr_copy(cmd->peer_macaddr.addr, mac);
  3366. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3367. "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
  3368. vdev_id, param_id, value, mac);
  3369. return ath10k_wmi_cmd_send(ar, skb,
  3370. ar->wmi.cmd->ap_ps_peer_param_cmdid);
  3371. }
  3372. int ath10k_wmi_scan_chan_list(struct ath10k *ar,
  3373. const struct wmi_scan_chan_list_arg *arg)
  3374. {
  3375. struct wmi_scan_chan_list_cmd *cmd;
  3376. struct sk_buff *skb;
  3377. struct wmi_channel_arg *ch;
  3378. struct wmi_channel *ci;
  3379. int len;
  3380. int i;
  3381. len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
  3382. skb = ath10k_wmi_alloc_skb(ar, len);
  3383. if (!skb)
  3384. return -EINVAL;
  3385. cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
  3386. cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
  3387. for (i = 0; i < arg->n_channels; i++) {
  3388. u32 flags = 0;
  3389. ch = &arg->channels[i];
  3390. ci = &cmd->chan_info[i];
  3391. if (ch->passive)
  3392. flags |= WMI_CHAN_FLAG_PASSIVE;
  3393. if (ch->allow_ibss)
  3394. flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
  3395. if (ch->allow_ht)
  3396. flags |= WMI_CHAN_FLAG_ALLOW_HT;
  3397. if (ch->allow_vht)
  3398. flags |= WMI_CHAN_FLAG_ALLOW_VHT;
  3399. if (ch->ht40plus)
  3400. flags |= WMI_CHAN_FLAG_HT40_PLUS;
  3401. if (ch->chan_radar)
  3402. flags |= WMI_CHAN_FLAG_DFS;
  3403. ci->mhz = __cpu_to_le32(ch->freq);
  3404. ci->band_center_freq1 = __cpu_to_le32(ch->freq);
  3405. ci->band_center_freq2 = 0;
  3406. ci->min_power = ch->min_power;
  3407. ci->max_power = ch->max_power;
  3408. ci->reg_power = ch->max_reg_power;
  3409. ci->antenna_max = ch->max_antenna_gain;
  3410. /* mode & flags share storage */
  3411. ci->mode = ch->mode;
  3412. ci->flags |= __cpu_to_le32(flags);
  3413. }
  3414. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
  3415. }
  3416. static void
  3417. ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
  3418. const struct wmi_peer_assoc_complete_arg *arg)
  3419. {
  3420. struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
  3421. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3422. cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
  3423. cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
  3424. cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
  3425. cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
  3426. cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
  3427. cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
  3428. cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
  3429. cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
  3430. cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
  3431. cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
  3432. cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
  3433. cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
  3434. ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
  3435. cmd->peer_legacy_rates.num_rates =
  3436. __cpu_to_le32(arg->peer_legacy_rates.num_rates);
  3437. memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
  3438. arg->peer_legacy_rates.num_rates);
  3439. cmd->peer_ht_rates.num_rates =
  3440. __cpu_to_le32(arg->peer_ht_rates.num_rates);
  3441. memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
  3442. arg->peer_ht_rates.num_rates);
  3443. cmd->peer_vht_rates.rx_max_rate =
  3444. __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
  3445. cmd->peer_vht_rates.rx_mcs_set =
  3446. __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
  3447. cmd->peer_vht_rates.tx_max_rate =
  3448. __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
  3449. cmd->peer_vht_rates.tx_mcs_set =
  3450. __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
  3451. }
  3452. static void
  3453. ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
  3454. const struct wmi_peer_assoc_complete_arg *arg)
  3455. {
  3456. struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
  3457. ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  3458. memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
  3459. }
  3460. static void
  3461. ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
  3462. const struct wmi_peer_assoc_complete_arg *arg)
  3463. {
  3464. ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  3465. }
  3466. static void
  3467. ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
  3468. const struct wmi_peer_assoc_complete_arg *arg)
  3469. {
  3470. struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
  3471. int max_mcs, max_nss;
  3472. u32 info0;
  3473. /* TODO: Is using max values okay with firmware? */
  3474. max_mcs = 0xf;
  3475. max_nss = 0xf;
  3476. info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
  3477. SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
  3478. ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  3479. cmd->info0 = __cpu_to_le32(info0);
  3480. }
  3481. int ath10k_wmi_peer_assoc(struct ath10k *ar,
  3482. const struct wmi_peer_assoc_complete_arg *arg)
  3483. {
  3484. struct sk_buff *skb;
  3485. int len;
  3486. if (arg->peer_mpdu_density > 16)
  3487. return -EINVAL;
  3488. if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
  3489. return -EINVAL;
  3490. if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
  3491. return -EINVAL;
  3492. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  3493. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  3494. len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
  3495. else
  3496. len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
  3497. } else {
  3498. len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
  3499. }
  3500. skb = ath10k_wmi_alloc_skb(ar, len);
  3501. if (!skb)
  3502. return -ENOMEM;
  3503. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  3504. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  3505. ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
  3506. else
  3507. ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
  3508. } else {
  3509. ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
  3510. }
  3511. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3512. "wmi peer assoc vdev %d addr %pM (%s)\n",
  3513. arg->vdev_id, arg->addr,
  3514. arg->peer_reassoc ? "reassociate" : "new");
  3515. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
  3516. }
  3517. /* This function assumes the beacon is already DMA mapped */
  3518. int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
  3519. {
  3520. struct wmi_bcn_tx_ref_cmd *cmd;
  3521. struct sk_buff *skb;
  3522. struct sk_buff *beacon = arvif->beacon;
  3523. struct ath10k *ar = arvif->ar;
  3524. struct ieee80211_hdr *hdr;
  3525. int ret;
  3526. u16 fc;
  3527. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3528. if (!skb)
  3529. return -ENOMEM;
  3530. hdr = (struct ieee80211_hdr *)beacon->data;
  3531. fc = le16_to_cpu(hdr->frame_control);
  3532. cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
  3533. cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
  3534. cmd->data_len = __cpu_to_le32(beacon->len);
  3535. cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr);
  3536. cmd->msdu_id = 0;
  3537. cmd->frame_control = __cpu_to_le32(fc);
  3538. cmd->flags = 0;
  3539. cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
  3540. if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero)
  3541. cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
  3542. if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab)
  3543. cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
  3544. ret = ath10k_wmi_cmd_send_nowait(ar, skb,
  3545. ar->wmi.cmd->pdev_send_bcn_cmdid);
  3546. if (ret)
  3547. dev_kfree_skb(skb);
  3548. return ret;
  3549. }
  3550. static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
  3551. const struct wmi_wmm_params_arg *arg)
  3552. {
  3553. params->cwmin = __cpu_to_le32(arg->cwmin);
  3554. params->cwmax = __cpu_to_le32(arg->cwmax);
  3555. params->aifs = __cpu_to_le32(arg->aifs);
  3556. params->txop = __cpu_to_le32(arg->txop);
  3557. params->acm = __cpu_to_le32(arg->acm);
  3558. params->no_ack = __cpu_to_le32(arg->no_ack);
  3559. }
  3560. int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
  3561. const struct wmi_pdev_set_wmm_params_arg *arg)
  3562. {
  3563. struct wmi_pdev_set_wmm_params *cmd;
  3564. struct sk_buff *skb;
  3565. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3566. if (!skb)
  3567. return -ENOMEM;
  3568. cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
  3569. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
  3570. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
  3571. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
  3572. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
  3573. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
  3574. return ath10k_wmi_cmd_send(ar, skb,
  3575. ar->wmi.cmd->pdev_set_wmm_params_cmdid);
  3576. }
  3577. int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
  3578. {
  3579. struct wmi_request_stats_cmd *cmd;
  3580. struct sk_buff *skb;
  3581. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3582. if (!skb)
  3583. return -ENOMEM;
  3584. cmd = (struct wmi_request_stats_cmd *)skb->data;
  3585. cmd->stats_id = __cpu_to_le32(stats_id);
  3586. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
  3587. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
  3588. }
  3589. int ath10k_wmi_force_fw_hang(struct ath10k *ar,
  3590. enum wmi_force_fw_hang_type type, u32 delay_ms)
  3591. {
  3592. struct wmi_force_fw_hang_cmd *cmd;
  3593. struct sk_buff *skb;
  3594. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3595. if (!skb)
  3596. return -ENOMEM;
  3597. cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
  3598. cmd->type = __cpu_to_le32(type);
  3599. cmd->delay_ms = __cpu_to_le32(delay_ms);
  3600. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
  3601. type, delay_ms);
  3602. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
  3603. }
  3604. int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
  3605. {
  3606. struct wmi_dbglog_cfg_cmd *cmd;
  3607. struct sk_buff *skb;
  3608. u32 cfg;
  3609. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3610. if (!skb)
  3611. return -ENOMEM;
  3612. cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
  3613. if (module_enable) {
  3614. cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE,
  3615. ATH10K_DBGLOG_CFG_LOG_LVL);
  3616. } else {
  3617. /* set back defaults, all modules with WARN level */
  3618. cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
  3619. ATH10K_DBGLOG_CFG_LOG_LVL);
  3620. module_enable = ~0;
  3621. }
  3622. cmd->module_enable = __cpu_to_le32(module_enable);
  3623. cmd->module_valid = __cpu_to_le32(~0);
  3624. cmd->config_enable = __cpu_to_le32(cfg);
  3625. cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
  3626. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3627. "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
  3628. __le32_to_cpu(cmd->module_enable),
  3629. __le32_to_cpu(cmd->module_valid),
  3630. __le32_to_cpu(cmd->config_enable),
  3631. __le32_to_cpu(cmd->config_valid));
  3632. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
  3633. }