wmi.c 136 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <linux/skbuff.h>
  18. #include <linux/ctype.h>
  19. #include "core.h"
  20. #include "htc.h"
  21. #include "debug.h"
  22. #include "wmi.h"
  23. #include "mac.h"
  24. #include "testmode.h"
  25. /* MAIN WMI cmd track */
  26. static struct wmi_cmd_map wmi_cmd_map = {
  27. .init_cmdid = WMI_INIT_CMDID,
  28. .start_scan_cmdid = WMI_START_SCAN_CMDID,
  29. .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
  30. .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
  31. .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
  32. .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
  33. .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
  34. .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
  35. .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
  36. .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
  37. .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
  38. .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
  39. .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
  40. .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
  41. .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
  42. .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  43. .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
  44. .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
  45. .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
  46. .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
  47. .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
  48. .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
  49. .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
  50. .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
  51. .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
  52. .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
  53. .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
  54. .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
  55. .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
  56. .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
  57. .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
  58. .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
  59. .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
  60. .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
  61. .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
  62. .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
  63. .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
  64. .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
  65. .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
  66. .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
  67. .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
  68. .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
  69. .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
  70. .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
  71. .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
  72. .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
  73. .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
  74. .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
  75. .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
  76. .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
  77. .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
  78. .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
  79. .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
  80. .roam_scan_mode = WMI_ROAM_SCAN_MODE,
  81. .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
  82. .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
  83. .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  84. .roam_ap_profile = WMI_ROAM_AP_PROFILE,
  85. .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
  86. .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
  87. .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
  88. .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
  89. .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
  90. .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
  91. .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
  92. .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
  93. .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
  94. .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
  95. .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
  96. .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
  97. .wlan_profile_set_hist_intvl_cmdid =
  98. WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  99. .wlan_profile_get_profile_data_cmdid =
  100. WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  101. .wlan_profile_enable_profile_id_cmdid =
  102. WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  103. .wlan_profile_list_profile_id_cmdid =
  104. WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  105. .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
  106. .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
  107. .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
  108. .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
  109. .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
  110. .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
  111. .wow_enable_disable_wake_event_cmdid =
  112. WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  113. .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
  114. .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  115. .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
  116. .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
  117. .vdev_spectral_scan_configure_cmdid =
  118. WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  119. .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  120. .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
  121. .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
  122. .network_list_offload_config_cmdid =
  123. WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
  124. .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
  125. .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
  126. .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
  127. .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
  128. .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
  129. .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
  130. .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
  131. .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
  132. .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
  133. .echo_cmdid = WMI_ECHO_CMDID,
  134. .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
  135. .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
  136. .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
  137. .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
  138. .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
  139. .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
  140. .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
  141. .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
  142. .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
  143. };
  144. /* 10.X WMI cmd track */
  145. static struct wmi_cmd_map wmi_10x_cmd_map = {
  146. .init_cmdid = WMI_10X_INIT_CMDID,
  147. .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
  148. .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
  149. .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
  150. .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
  151. .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
  152. .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
  153. .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
  154. .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
  155. .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
  156. .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
  157. .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
  158. .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
  159. .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
  160. .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
  161. .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  162. .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
  163. .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
  164. .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
  165. .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
  166. .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
  167. .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
  168. .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
  169. .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
  170. .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
  171. .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
  172. .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
  173. .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
  174. .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
  175. .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
  176. .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
  177. .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
  178. .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
  179. .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
  180. .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
  181. .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
  182. .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
  183. .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  184. .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
  185. .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
  186. .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
  187. .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  188. .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
  189. .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
  190. .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
  191. .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
  192. .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
  193. .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
  194. .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
  195. .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
  196. .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
  197. .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
  198. .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
  199. .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
  200. .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
  201. .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
  202. .roam_scan_rssi_change_threshold =
  203. WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  204. .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
  205. .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
  206. .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
  207. .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
  208. .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
  209. .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
  210. .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
  211. .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
  212. .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
  213. .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
  214. .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
  215. .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
  216. .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
  217. .wlan_profile_set_hist_intvl_cmdid =
  218. WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  219. .wlan_profile_get_profile_data_cmdid =
  220. WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  221. .wlan_profile_enable_profile_id_cmdid =
  222. WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  223. .wlan_profile_list_profile_id_cmdid =
  224. WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  225. .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
  226. .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
  227. .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
  228. .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
  229. .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
  230. .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
  231. .wow_enable_disable_wake_event_cmdid =
  232. WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  233. .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
  234. .wow_hostwakeup_from_sleep_cmdid =
  235. WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  236. .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
  237. .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
  238. .vdev_spectral_scan_configure_cmdid =
  239. WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  240. .vdev_spectral_scan_enable_cmdid =
  241. WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  242. .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
  243. .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
  244. .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
  245. .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
  246. .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
  247. .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
  248. .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
  249. .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
  250. .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
  251. .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
  252. .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
  253. .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
  254. .echo_cmdid = WMI_10X_ECHO_CMDID,
  255. .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
  256. .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
  257. .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
  258. .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
  259. .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  260. .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  261. .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
  262. .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
  263. .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
  264. };
  265. /* MAIN WMI VDEV param map */
  266. static struct wmi_vdev_param_map wmi_vdev_param_map = {
  267. .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
  268. .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
  269. .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
  270. .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
  271. .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
  272. .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
  273. .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
  274. .preamble = WMI_VDEV_PARAM_PREAMBLE,
  275. .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
  276. .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
  277. .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
  278. .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
  279. .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
  280. .wmi_vdev_oc_scheduler_air_time_limit =
  281. WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
  282. .wds = WMI_VDEV_PARAM_WDS,
  283. .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
  284. .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
  285. .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
  286. .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
  287. .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
  288. .chwidth = WMI_VDEV_PARAM_CHWIDTH,
  289. .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
  290. .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
  291. .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
  292. .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
  293. .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
  294. .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
  295. .sgi = WMI_VDEV_PARAM_SGI,
  296. .ldpc = WMI_VDEV_PARAM_LDPC,
  297. .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
  298. .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
  299. .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
  300. .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
  301. .nss = WMI_VDEV_PARAM_NSS,
  302. .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
  303. .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
  304. .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
  305. .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
  306. .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
  307. .ap_keepalive_min_idle_inactive_time_secs =
  308. WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
  309. .ap_keepalive_max_idle_inactive_time_secs =
  310. WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
  311. .ap_keepalive_max_unresponsive_time_secs =
  312. WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
  313. .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
  314. .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
  315. .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
  316. .txbf = WMI_VDEV_PARAM_TXBF,
  317. .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
  318. .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
  319. .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
  320. .ap_detect_out_of_sync_sleeping_sta_time_secs =
  321. WMI_VDEV_PARAM_UNSUPPORTED,
  322. };
  323. /* 10.X WMI VDEV param map */
  324. static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
  325. .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
  326. .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
  327. .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
  328. .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
  329. .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
  330. .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
  331. .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
  332. .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
  333. .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
  334. .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
  335. .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
  336. .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
  337. .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
  338. .wmi_vdev_oc_scheduler_air_time_limit =
  339. WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
  340. .wds = WMI_10X_VDEV_PARAM_WDS,
  341. .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
  342. .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
  343. .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
  344. .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
  345. .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
  346. .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
  347. .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
  348. .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
  349. .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
  350. .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
  351. .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
  352. .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
  353. .sgi = WMI_10X_VDEV_PARAM_SGI,
  354. .ldpc = WMI_10X_VDEV_PARAM_LDPC,
  355. .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
  356. .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
  357. .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
  358. .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
  359. .nss = WMI_10X_VDEV_PARAM_NSS,
  360. .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
  361. .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
  362. .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
  363. .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
  364. .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
  365. .ap_keepalive_min_idle_inactive_time_secs =
  366. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
  367. .ap_keepalive_max_idle_inactive_time_secs =
  368. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
  369. .ap_keepalive_max_unresponsive_time_secs =
  370. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
  371. .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
  372. .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
  373. .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
  374. .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
  375. .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
  376. .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
  377. .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
  378. .ap_detect_out_of_sync_sleeping_sta_time_secs =
  379. WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
  380. };
  381. static struct wmi_pdev_param_map wmi_pdev_param_map = {
  382. .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
  383. .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
  384. .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
  385. .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
  386. .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
  387. .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
  388. .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
  389. .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
  390. .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
  391. .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
  392. .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
  393. .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
  394. .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
  395. .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
  396. .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
  397. .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
  398. .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
  399. .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
  400. .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
  401. .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
  402. .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
  403. .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
  404. .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
  405. .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
  406. .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
  407. .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
  408. .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
  409. .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
  410. .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
  411. .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
  412. .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
  413. .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
  414. .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
  415. .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
  416. .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
  417. .dcs = WMI_PDEV_PARAM_DCS,
  418. .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
  419. .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
  420. .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
  421. .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
  422. .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
  423. .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
  424. .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
  425. .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
  426. .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
  427. .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
  428. .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
  429. .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
  430. };
  431. static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
  432. .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
  433. .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
  434. .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
  435. .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
  436. .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
  437. .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
  438. .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
  439. .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
  440. .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
  441. .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
  442. .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
  443. .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
  444. .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
  445. .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
  446. .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
  447. .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
  448. .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
  449. .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
  450. .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
  451. .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
  452. .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
  453. .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
  454. .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
  455. .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
  456. .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
  457. .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
  458. .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
  459. .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
  460. .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
  461. .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
  462. .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
  463. .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
  464. .bcnflt_stats_update_period =
  465. WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
  466. .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
  467. .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
  468. .dcs = WMI_10X_PDEV_PARAM_DCS,
  469. .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
  470. .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
  471. .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
  472. .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
  473. .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
  474. .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
  475. .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
  476. .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
  477. .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
  478. .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
  479. .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
  480. .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
  481. };
  482. /* firmware 10.2 specific mappings */
  483. static struct wmi_cmd_map wmi_10_2_cmd_map = {
  484. .init_cmdid = WMI_10_2_INIT_CMDID,
  485. .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
  486. .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
  487. .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
  488. .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
  489. .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
  490. .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
  491. .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
  492. .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
  493. .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
  494. .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
  495. .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
  496. .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
  497. .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
  498. .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  499. .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
  500. .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
  501. .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
  502. .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
  503. .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
  504. .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
  505. .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
  506. .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
  507. .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
  508. .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
  509. .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
  510. .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
  511. .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
  512. .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
  513. .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
  514. .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
  515. .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
  516. .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
  517. .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
  518. .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
  519. .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
  520. .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  521. .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
  522. .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
  523. .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
  524. .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  525. .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
  526. .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
  527. .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
  528. .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
  529. .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
  530. .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
  531. .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
  532. .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
  533. .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
  534. .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
  535. .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
  536. .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
  537. .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
  538. .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
  539. .roam_scan_rssi_change_threshold =
  540. WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  541. .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
  542. .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
  543. .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
  544. .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
  545. .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
  546. .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
  547. .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
  548. .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
  549. .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
  550. .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
  551. .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
  552. .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
  553. .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
  554. .wlan_profile_set_hist_intvl_cmdid =
  555. WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  556. .wlan_profile_get_profile_data_cmdid =
  557. WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  558. .wlan_profile_enable_profile_id_cmdid =
  559. WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  560. .wlan_profile_list_profile_id_cmdid =
  561. WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  562. .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
  563. .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
  564. .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
  565. .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
  566. .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
  567. .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
  568. .wow_enable_disable_wake_event_cmdid =
  569. WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  570. .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
  571. .wow_hostwakeup_from_sleep_cmdid =
  572. WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  573. .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
  574. .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
  575. .vdev_spectral_scan_configure_cmdid =
  576. WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  577. .vdev_spectral_scan_enable_cmdid =
  578. WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  579. .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
  580. .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
  581. .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
  582. .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
  583. .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
  584. .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
  585. .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
  586. .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
  587. .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
  588. .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
  589. .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
  590. .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
  591. .echo_cmdid = WMI_10_2_ECHO_CMDID,
  592. .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
  593. .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
  594. .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
  595. .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
  596. .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  597. .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  598. .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
  599. .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
  600. .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
  601. };
  602. static void
  603. ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
  604. const struct wmi_channel_arg *arg)
  605. {
  606. u32 flags = 0;
  607. memset(ch, 0, sizeof(*ch));
  608. if (arg->passive)
  609. flags |= WMI_CHAN_FLAG_PASSIVE;
  610. if (arg->allow_ibss)
  611. flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
  612. if (arg->allow_ht)
  613. flags |= WMI_CHAN_FLAG_ALLOW_HT;
  614. if (arg->allow_vht)
  615. flags |= WMI_CHAN_FLAG_ALLOW_VHT;
  616. if (arg->ht40plus)
  617. flags |= WMI_CHAN_FLAG_HT40_PLUS;
  618. if (arg->chan_radar)
  619. flags |= WMI_CHAN_FLAG_DFS;
  620. ch->mhz = __cpu_to_le32(arg->freq);
  621. ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
  622. ch->band_center_freq2 = 0;
  623. ch->min_power = arg->min_power;
  624. ch->max_power = arg->max_power;
  625. ch->reg_power = arg->max_reg_power;
  626. ch->antenna_max = arg->max_antenna_gain;
  627. /* mode & flags share storage */
  628. ch->mode = arg->mode;
  629. ch->flags |= __cpu_to_le32(flags);
  630. }
  631. int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
  632. {
  633. int ret;
  634. ret = wait_for_completion_timeout(&ar->wmi.service_ready,
  635. WMI_SERVICE_READY_TIMEOUT_HZ);
  636. return ret;
  637. }
  638. int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
  639. {
  640. int ret;
  641. ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
  642. WMI_UNIFIED_READY_TIMEOUT_HZ);
  643. return ret;
  644. }
  645. struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
  646. {
  647. struct sk_buff *skb;
  648. u32 round_len = roundup(len, 4);
  649. skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
  650. if (!skb)
  651. return NULL;
  652. skb_reserve(skb, WMI_SKB_HEADROOM);
  653. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  654. ath10k_warn(ar, "Unaligned WMI skb\n");
  655. skb_put(skb, round_len);
  656. memset(skb->data, 0, round_len);
  657. return skb;
  658. }
  659. static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
  660. {
  661. dev_kfree_skb(skb);
  662. }
  663. static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
  664. u32 cmd_id)
  665. {
  666. struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
  667. struct wmi_cmd_hdr *cmd_hdr;
  668. int ret;
  669. u32 cmd = 0;
  670. if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  671. return -ENOMEM;
  672. cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
  673. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  674. cmd_hdr->cmd_id = __cpu_to_le32(cmd);
  675. memset(skb_cb, 0, sizeof(*skb_cb));
  676. ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
  677. trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
  678. if (ret)
  679. goto err_pull;
  680. return 0;
  681. err_pull:
  682. skb_pull(skb, sizeof(struct wmi_cmd_hdr));
  683. return ret;
  684. }
  685. static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
  686. {
  687. int ret;
  688. lockdep_assert_held(&arvif->ar->data_lock);
  689. if (arvif->beacon == NULL)
  690. return;
  691. if (arvif->beacon_sent)
  692. return;
  693. ret = ath10k_wmi_beacon_send_ref_nowait(arvif);
  694. if (ret)
  695. return;
  696. /* We need to retain the arvif->beacon reference for DMA unmapping and
  697. * freeing the skbuff later. */
  698. arvif->beacon_sent = true;
  699. }
  700. static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
  701. struct ieee80211_vif *vif)
  702. {
  703. struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  704. ath10k_wmi_tx_beacon_nowait(arvif);
  705. }
  706. static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
  707. {
  708. spin_lock_bh(&ar->data_lock);
  709. ieee80211_iterate_active_interfaces_atomic(ar->hw,
  710. IEEE80211_IFACE_ITER_NORMAL,
  711. ath10k_wmi_tx_beacons_iter,
  712. NULL);
  713. spin_unlock_bh(&ar->data_lock);
  714. }
  715. static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
  716. {
  717. /* try to send pending beacons first. they take priority */
  718. ath10k_wmi_tx_beacons_nowait(ar);
  719. wake_up(&ar->wmi.tx_credits_wq);
  720. }
  721. int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
  722. {
  723. int ret = -EOPNOTSUPP;
  724. might_sleep();
  725. if (cmd_id == WMI_CMD_UNSUPPORTED) {
  726. ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
  727. cmd_id);
  728. return ret;
  729. }
  730. wait_event_timeout(ar->wmi.tx_credits_wq, ({
  731. /* try to send pending beacons first. they take priority */
  732. ath10k_wmi_tx_beacons_nowait(ar);
  733. ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
  734. (ret != -EAGAIN);
  735. }), 3*HZ);
  736. if (ret)
  737. dev_kfree_skb_any(skb);
  738. return ret;
  739. }
  740. int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
  741. {
  742. int ret = 0;
  743. struct wmi_mgmt_tx_cmd *cmd;
  744. struct ieee80211_hdr *hdr;
  745. struct sk_buff *wmi_skb;
  746. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  747. int len;
  748. u32 buf_len = skb->len;
  749. u16 fc;
  750. hdr = (struct ieee80211_hdr *)skb->data;
  751. fc = le16_to_cpu(hdr->frame_control);
  752. if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
  753. return -EINVAL;
  754. len = sizeof(cmd->hdr) + skb->len;
  755. if ((ieee80211_is_action(hdr->frame_control) ||
  756. ieee80211_is_deauth(hdr->frame_control) ||
  757. ieee80211_is_disassoc(hdr->frame_control)) &&
  758. ieee80211_has_protected(hdr->frame_control)) {
  759. len += IEEE80211_CCMP_MIC_LEN;
  760. buf_len += IEEE80211_CCMP_MIC_LEN;
  761. }
  762. len = round_up(len, 4);
  763. wmi_skb = ath10k_wmi_alloc_skb(ar, len);
  764. if (!wmi_skb)
  765. return -ENOMEM;
  766. cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data;
  767. cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
  768. cmd->hdr.tx_rate = 0;
  769. cmd->hdr.tx_power = 0;
  770. cmd->hdr.buf_len = __cpu_to_le32(buf_len);
  771. ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
  772. memcpy(cmd->buf, skb->data, skb->len);
  773. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
  774. wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
  775. fc & IEEE80211_FCTL_STYPE);
  776. trace_ath10k_wmi_mgmt_tx(ar, skb->data, skb->len);
  777. /* Send the management frame buffer to the target */
  778. ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
  779. if (ret)
  780. return ret;
  781. /* TODO: report tx status to mac80211 - temporary just ACK */
  782. info->flags |= IEEE80211_TX_STAT_ACK;
  783. ieee80211_tx_status_irqsafe(ar->hw, skb);
  784. return ret;
  785. }
  786. static void ath10k_wmi_event_scan_started(struct ath10k *ar)
  787. {
  788. lockdep_assert_held(&ar->data_lock);
  789. switch (ar->scan.state) {
  790. case ATH10K_SCAN_IDLE:
  791. case ATH10K_SCAN_RUNNING:
  792. case ATH10K_SCAN_ABORTING:
  793. ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
  794. ath10k_scan_state_str(ar->scan.state),
  795. ar->scan.state);
  796. break;
  797. case ATH10K_SCAN_STARTING:
  798. ar->scan.state = ATH10K_SCAN_RUNNING;
  799. if (ar->scan.is_roc)
  800. ieee80211_ready_on_channel(ar->hw);
  801. complete(&ar->scan.started);
  802. break;
  803. }
  804. }
  805. static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
  806. {
  807. lockdep_assert_held(&ar->data_lock);
  808. switch (ar->scan.state) {
  809. case ATH10K_SCAN_IDLE:
  810. case ATH10K_SCAN_STARTING:
  811. /* One suspected reason scan can be completed while starting is
  812. * if firmware fails to deliver all scan events to the host,
  813. * e.g. when transport pipe is full. This has been observed
  814. * with spectral scan phyerr events starving wmi transport
  815. * pipe. In such case the "scan completed" event should be (and
  816. * is) ignored by the host as it may be just firmware's scan
  817. * state machine recovering.
  818. */
  819. ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
  820. ath10k_scan_state_str(ar->scan.state),
  821. ar->scan.state);
  822. break;
  823. case ATH10K_SCAN_RUNNING:
  824. case ATH10K_SCAN_ABORTING:
  825. __ath10k_scan_finish(ar);
  826. break;
  827. }
  828. }
  829. static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
  830. {
  831. lockdep_assert_held(&ar->data_lock);
  832. switch (ar->scan.state) {
  833. case ATH10K_SCAN_IDLE:
  834. case ATH10K_SCAN_STARTING:
  835. ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
  836. ath10k_scan_state_str(ar->scan.state),
  837. ar->scan.state);
  838. break;
  839. case ATH10K_SCAN_RUNNING:
  840. case ATH10K_SCAN_ABORTING:
  841. ar->scan_channel = NULL;
  842. break;
  843. }
  844. }
  845. static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
  846. {
  847. lockdep_assert_held(&ar->data_lock);
  848. switch (ar->scan.state) {
  849. case ATH10K_SCAN_IDLE:
  850. case ATH10K_SCAN_STARTING:
  851. ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
  852. ath10k_scan_state_str(ar->scan.state),
  853. ar->scan.state);
  854. break;
  855. case ATH10K_SCAN_RUNNING:
  856. case ATH10K_SCAN_ABORTING:
  857. ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
  858. if (ar->scan.is_roc && ar->scan.roc_freq == freq)
  859. complete(&ar->scan.on_channel);
  860. break;
  861. }
  862. }
  863. static const char *
  864. ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
  865. enum wmi_scan_completion_reason reason)
  866. {
  867. switch (type) {
  868. case WMI_SCAN_EVENT_STARTED:
  869. return "started";
  870. case WMI_SCAN_EVENT_COMPLETED:
  871. switch (reason) {
  872. case WMI_SCAN_REASON_COMPLETED:
  873. return "completed";
  874. case WMI_SCAN_REASON_CANCELLED:
  875. return "completed [cancelled]";
  876. case WMI_SCAN_REASON_PREEMPTED:
  877. return "completed [preempted]";
  878. case WMI_SCAN_REASON_TIMEDOUT:
  879. return "completed [timedout]";
  880. case WMI_SCAN_REASON_MAX:
  881. break;
  882. }
  883. return "completed [unknown]";
  884. case WMI_SCAN_EVENT_BSS_CHANNEL:
  885. return "bss channel";
  886. case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
  887. return "foreign channel";
  888. case WMI_SCAN_EVENT_DEQUEUED:
  889. return "dequeued";
  890. case WMI_SCAN_EVENT_PREEMPTED:
  891. return "preempted";
  892. case WMI_SCAN_EVENT_START_FAILED:
  893. return "start failed";
  894. default:
  895. return "unknown";
  896. }
  897. }
  898. static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
  899. {
  900. struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
  901. enum wmi_scan_event_type event_type;
  902. enum wmi_scan_completion_reason reason;
  903. u32 freq;
  904. u32 req_id;
  905. u32 scan_id;
  906. u32 vdev_id;
  907. event_type = __le32_to_cpu(event->event_type);
  908. reason = __le32_to_cpu(event->reason);
  909. freq = __le32_to_cpu(event->channel_freq);
  910. req_id = __le32_to_cpu(event->scan_req_id);
  911. scan_id = __le32_to_cpu(event->scan_id);
  912. vdev_id = __le32_to_cpu(event->vdev_id);
  913. spin_lock_bh(&ar->data_lock);
  914. ath10k_dbg(ar, ATH10K_DBG_WMI,
  915. "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
  916. ath10k_wmi_event_scan_type_str(event_type, reason),
  917. event_type, reason, freq, req_id, scan_id, vdev_id,
  918. ath10k_scan_state_str(ar->scan.state), ar->scan.state);
  919. switch (event_type) {
  920. case WMI_SCAN_EVENT_STARTED:
  921. ath10k_wmi_event_scan_started(ar);
  922. break;
  923. case WMI_SCAN_EVENT_COMPLETED:
  924. ath10k_wmi_event_scan_completed(ar);
  925. break;
  926. case WMI_SCAN_EVENT_BSS_CHANNEL:
  927. ath10k_wmi_event_scan_bss_chan(ar);
  928. break;
  929. case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
  930. ath10k_wmi_event_scan_foreign_chan(ar, freq);
  931. break;
  932. case WMI_SCAN_EVENT_START_FAILED:
  933. ath10k_warn(ar, "received scan start failure event\n");
  934. break;
  935. case WMI_SCAN_EVENT_DEQUEUED:
  936. case WMI_SCAN_EVENT_PREEMPTED:
  937. default:
  938. break;
  939. }
  940. spin_unlock_bh(&ar->data_lock);
  941. return 0;
  942. }
  943. static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
  944. {
  945. enum ieee80211_band band;
  946. switch (phy_mode) {
  947. case MODE_11A:
  948. case MODE_11NA_HT20:
  949. case MODE_11NA_HT40:
  950. case MODE_11AC_VHT20:
  951. case MODE_11AC_VHT40:
  952. case MODE_11AC_VHT80:
  953. band = IEEE80211_BAND_5GHZ;
  954. break;
  955. case MODE_11G:
  956. case MODE_11B:
  957. case MODE_11GONLY:
  958. case MODE_11NG_HT20:
  959. case MODE_11NG_HT40:
  960. case MODE_11AC_VHT20_2G:
  961. case MODE_11AC_VHT40_2G:
  962. case MODE_11AC_VHT80_2G:
  963. default:
  964. band = IEEE80211_BAND_2GHZ;
  965. }
  966. return band;
  967. }
  968. static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
  969. {
  970. u8 rate_idx = 0;
  971. /* rate in Kbps */
  972. switch (rate) {
  973. case 1000:
  974. rate_idx = 0;
  975. break;
  976. case 2000:
  977. rate_idx = 1;
  978. break;
  979. case 5500:
  980. rate_idx = 2;
  981. break;
  982. case 11000:
  983. rate_idx = 3;
  984. break;
  985. case 6000:
  986. rate_idx = 4;
  987. break;
  988. case 9000:
  989. rate_idx = 5;
  990. break;
  991. case 12000:
  992. rate_idx = 6;
  993. break;
  994. case 18000:
  995. rate_idx = 7;
  996. break;
  997. case 24000:
  998. rate_idx = 8;
  999. break;
  1000. case 36000:
  1001. rate_idx = 9;
  1002. break;
  1003. case 48000:
  1004. rate_idx = 10;
  1005. break;
  1006. case 54000:
  1007. rate_idx = 11;
  1008. break;
  1009. default:
  1010. break;
  1011. }
  1012. if (band == IEEE80211_BAND_5GHZ) {
  1013. if (rate_idx > 3)
  1014. /* Omit CCK rates */
  1015. rate_idx -= 4;
  1016. else
  1017. rate_idx = 0;
  1018. }
  1019. return rate_idx;
  1020. }
  1021. static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
  1022. {
  1023. struct wmi_mgmt_rx_event_v1 *ev_v1;
  1024. struct wmi_mgmt_rx_event_v2 *ev_v2;
  1025. struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
  1026. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  1027. struct ieee80211_hdr *hdr;
  1028. u32 rx_status;
  1029. u32 channel;
  1030. u32 phy_mode;
  1031. u32 snr;
  1032. u32 rate;
  1033. u32 buf_len;
  1034. u16 fc;
  1035. int pull_len;
  1036. if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
  1037. ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
  1038. ev_hdr = &ev_v2->hdr.v1;
  1039. pull_len = sizeof(*ev_v2);
  1040. } else {
  1041. ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
  1042. ev_hdr = &ev_v1->hdr;
  1043. pull_len = sizeof(*ev_v1);
  1044. }
  1045. channel = __le32_to_cpu(ev_hdr->channel);
  1046. buf_len = __le32_to_cpu(ev_hdr->buf_len);
  1047. rx_status = __le32_to_cpu(ev_hdr->status);
  1048. snr = __le32_to_cpu(ev_hdr->snr);
  1049. phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
  1050. rate = __le32_to_cpu(ev_hdr->rate);
  1051. memset(status, 0, sizeof(*status));
  1052. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1053. "event mgmt rx status %08x\n", rx_status);
  1054. if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  1055. dev_kfree_skb(skb);
  1056. return 0;
  1057. }
  1058. if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
  1059. dev_kfree_skb(skb);
  1060. return 0;
  1061. }
  1062. if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
  1063. dev_kfree_skb(skb);
  1064. return 0;
  1065. }
  1066. if (rx_status & WMI_RX_STATUS_ERR_CRC)
  1067. status->flag |= RX_FLAG_FAILED_FCS_CRC;
  1068. if (rx_status & WMI_RX_STATUS_ERR_MIC)
  1069. status->flag |= RX_FLAG_MMIC_ERROR;
  1070. /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
  1071. * MODE_11B. This means phy_mode is not a reliable source for the band
  1072. * of mgmt rx.
  1073. */
  1074. if (channel >= 1 && channel <= 14) {
  1075. status->band = IEEE80211_BAND_2GHZ;
  1076. } else if (channel >= 36 && channel <= 165) {
  1077. status->band = IEEE80211_BAND_5GHZ;
  1078. } else {
  1079. /* Shouldn't happen unless list of advertised channels to
  1080. * mac80211 has been changed.
  1081. */
  1082. WARN_ON_ONCE(1);
  1083. dev_kfree_skb(skb);
  1084. return 0;
  1085. }
  1086. if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
  1087. ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
  1088. status->freq = ieee80211_channel_to_frequency(channel, status->band);
  1089. status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
  1090. status->rate_idx = get_rate_idx(rate, status->band);
  1091. skb_pull(skb, pull_len);
  1092. hdr = (struct ieee80211_hdr *)skb->data;
  1093. fc = le16_to_cpu(hdr->frame_control);
  1094. /* FW delivers WEP Shared Auth frame with Protected Bit set and
  1095. * encrypted payload. However in case of PMF it delivers decrypted
  1096. * frames with Protected Bit set. */
  1097. if (ieee80211_has_protected(hdr->frame_control) &&
  1098. !ieee80211_is_auth(hdr->frame_control)) {
  1099. status->flag |= RX_FLAG_DECRYPTED;
  1100. if (!ieee80211_is_action(hdr->frame_control) &&
  1101. !ieee80211_is_deauth(hdr->frame_control) &&
  1102. !ieee80211_is_disassoc(hdr->frame_control)) {
  1103. status->flag |= RX_FLAG_IV_STRIPPED |
  1104. RX_FLAG_MMIC_STRIPPED;
  1105. hdr->frame_control = __cpu_to_le16(fc &
  1106. ~IEEE80211_FCTL_PROTECTED);
  1107. }
  1108. }
  1109. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1110. "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
  1111. skb, skb->len,
  1112. fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
  1113. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1114. "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
  1115. status->freq, status->band, status->signal,
  1116. status->rate_idx);
  1117. /*
  1118. * packets from HTC come aligned to 4byte boundaries
  1119. * because they can originally come in along with a trailer
  1120. */
  1121. skb_trim(skb, buf_len);
  1122. ieee80211_rx(ar->hw, skb);
  1123. return 0;
  1124. }
  1125. static int freq_to_idx(struct ath10k *ar, int freq)
  1126. {
  1127. struct ieee80211_supported_band *sband;
  1128. int band, ch, idx = 0;
  1129. for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
  1130. sband = ar->hw->wiphy->bands[band];
  1131. if (!sband)
  1132. continue;
  1133. for (ch = 0; ch < sband->n_channels; ch++, idx++)
  1134. if (sband->channels[ch].center_freq == freq)
  1135. goto exit;
  1136. }
  1137. exit:
  1138. return idx;
  1139. }
  1140. static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
  1141. {
  1142. struct wmi_chan_info_event *ev;
  1143. struct survey_info *survey;
  1144. u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
  1145. int idx;
  1146. ev = (struct wmi_chan_info_event *)skb->data;
  1147. err_code = __le32_to_cpu(ev->err_code);
  1148. freq = __le32_to_cpu(ev->freq);
  1149. cmd_flags = __le32_to_cpu(ev->cmd_flags);
  1150. noise_floor = __le32_to_cpu(ev->noise_floor);
  1151. rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
  1152. cycle_count = __le32_to_cpu(ev->cycle_count);
  1153. ath10k_dbg(ar, ATH10K_DBG_WMI,
  1154. "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
  1155. err_code, freq, cmd_flags, noise_floor, rx_clear_count,
  1156. cycle_count);
  1157. spin_lock_bh(&ar->data_lock);
  1158. switch (ar->scan.state) {
  1159. case ATH10K_SCAN_IDLE:
  1160. case ATH10K_SCAN_STARTING:
  1161. ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
  1162. goto exit;
  1163. case ATH10K_SCAN_RUNNING:
  1164. case ATH10K_SCAN_ABORTING:
  1165. break;
  1166. }
  1167. idx = freq_to_idx(ar, freq);
  1168. if (idx >= ARRAY_SIZE(ar->survey)) {
  1169. ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
  1170. freq, idx);
  1171. goto exit;
  1172. }
  1173. if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
  1174. /* During scanning chan info is reported twice for each
  1175. * visited channel. The reported cycle count is global
  1176. * and per-channel cycle count must be calculated */
  1177. cycle_count -= ar->survey_last_cycle_count;
  1178. rx_clear_count -= ar->survey_last_rx_clear_count;
  1179. survey = &ar->survey[idx];
  1180. survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count);
  1181. survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
  1182. survey->noise = noise_floor;
  1183. survey->filled = SURVEY_INFO_CHANNEL_TIME |
  1184. SURVEY_INFO_CHANNEL_TIME_RX |
  1185. SURVEY_INFO_NOISE_DBM;
  1186. }
  1187. ar->survey_last_rx_clear_count = rx_clear_count;
  1188. ar->survey_last_cycle_count = cycle_count;
  1189. exit:
  1190. spin_unlock_bh(&ar->data_lock);
  1191. }
  1192. static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
  1193. {
  1194. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
  1195. }
  1196. static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
  1197. {
  1198. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
  1199. skb->len);
  1200. trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
  1201. return 0;
  1202. }
  1203. static void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src,
  1204. struct ath10k_fw_stats_pdev *dst)
  1205. {
  1206. const struct wal_dbg_tx_stats *tx = &src->wal.tx;
  1207. const struct wal_dbg_rx_stats *rx = &src->wal.rx;
  1208. dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
  1209. dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
  1210. dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
  1211. dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
  1212. dst->cycle_count = __le32_to_cpu(src->cycle_count);
  1213. dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
  1214. dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
  1215. dst->comp_queued = __le32_to_cpu(tx->comp_queued);
  1216. dst->comp_delivered = __le32_to_cpu(tx->comp_delivered);
  1217. dst->msdu_enqued = __le32_to_cpu(tx->msdu_enqued);
  1218. dst->mpdu_enqued = __le32_to_cpu(tx->mpdu_enqued);
  1219. dst->wmm_drop = __le32_to_cpu(tx->wmm_drop);
  1220. dst->local_enqued = __le32_to_cpu(tx->local_enqued);
  1221. dst->local_freed = __le32_to_cpu(tx->local_freed);
  1222. dst->hw_queued = __le32_to_cpu(tx->hw_queued);
  1223. dst->hw_reaped = __le32_to_cpu(tx->hw_reaped);
  1224. dst->underrun = __le32_to_cpu(tx->underrun);
  1225. dst->tx_abort = __le32_to_cpu(tx->tx_abort);
  1226. dst->mpdus_requed = __le32_to_cpu(tx->mpdus_requed);
  1227. dst->tx_ko = __le32_to_cpu(tx->tx_ko);
  1228. dst->data_rc = __le32_to_cpu(tx->data_rc);
  1229. dst->self_triggers = __le32_to_cpu(tx->self_triggers);
  1230. dst->sw_retry_failure = __le32_to_cpu(tx->sw_retry_failure);
  1231. dst->illgl_rate_phy_err = __le32_to_cpu(tx->illgl_rate_phy_err);
  1232. dst->pdev_cont_xretry = __le32_to_cpu(tx->pdev_cont_xretry);
  1233. dst->pdev_tx_timeout = __le32_to_cpu(tx->pdev_tx_timeout);
  1234. dst->pdev_resets = __le32_to_cpu(tx->pdev_resets);
  1235. dst->phy_underrun = __le32_to_cpu(tx->phy_underrun);
  1236. dst->txop_ovf = __le32_to_cpu(tx->txop_ovf);
  1237. dst->mid_ppdu_route_change = __le32_to_cpu(rx->mid_ppdu_route_change);
  1238. dst->status_rcvd = __le32_to_cpu(rx->status_rcvd);
  1239. dst->r0_frags = __le32_to_cpu(rx->r0_frags);
  1240. dst->r1_frags = __le32_to_cpu(rx->r1_frags);
  1241. dst->r2_frags = __le32_to_cpu(rx->r2_frags);
  1242. dst->r3_frags = __le32_to_cpu(rx->r3_frags);
  1243. dst->htt_msdus = __le32_to_cpu(rx->htt_msdus);
  1244. dst->htt_mpdus = __le32_to_cpu(rx->htt_mpdus);
  1245. dst->loc_msdus = __le32_to_cpu(rx->loc_msdus);
  1246. dst->loc_mpdus = __le32_to_cpu(rx->loc_mpdus);
  1247. dst->oversize_amsdu = __le32_to_cpu(rx->oversize_amsdu);
  1248. dst->phy_errs = __le32_to_cpu(rx->phy_errs);
  1249. dst->phy_err_drop = __le32_to_cpu(rx->phy_err_drop);
  1250. dst->mpdu_errs = __le32_to_cpu(rx->mpdu_errs);
  1251. }
  1252. static void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
  1253. struct ath10k_fw_stats_peer *dst)
  1254. {
  1255. ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
  1256. dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
  1257. dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
  1258. }
  1259. static int ath10k_wmi_main_pull_fw_stats(struct ath10k *ar,
  1260. struct sk_buff *skb,
  1261. struct ath10k_fw_stats *stats)
  1262. {
  1263. const struct wmi_stats_event *ev = (void *)skb->data;
  1264. u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
  1265. int i;
  1266. if (!skb_pull(skb, sizeof(*ev)))
  1267. return -EPROTO;
  1268. num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  1269. num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  1270. num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  1271. for (i = 0; i < num_pdev_stats; i++) {
  1272. const struct wmi_pdev_stats *src;
  1273. struct ath10k_fw_stats_pdev *dst;
  1274. src = (void *)skb->data;
  1275. if (!skb_pull(skb, sizeof(*src)))
  1276. return -EPROTO;
  1277. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1278. if (!dst)
  1279. continue;
  1280. ath10k_wmi_pull_pdev_stats(src, dst);
  1281. list_add_tail(&dst->list, &stats->pdevs);
  1282. }
  1283. /* fw doesn't implement vdev stats */
  1284. for (i = 0; i < num_peer_stats; i++) {
  1285. const struct wmi_peer_stats *src;
  1286. struct ath10k_fw_stats_peer *dst;
  1287. src = (void *)skb->data;
  1288. if (!skb_pull(skb, sizeof(*src)))
  1289. return -EPROTO;
  1290. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1291. if (!dst)
  1292. continue;
  1293. ath10k_wmi_pull_peer_stats(src, dst);
  1294. list_add_tail(&dst->list, &stats->peers);
  1295. }
  1296. return 0;
  1297. }
  1298. static int ath10k_wmi_10x_pull_fw_stats(struct ath10k *ar,
  1299. struct sk_buff *skb,
  1300. struct ath10k_fw_stats *stats)
  1301. {
  1302. const struct wmi_stats_event *ev = (void *)skb->data;
  1303. u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
  1304. int i;
  1305. if (!skb_pull(skb, sizeof(*ev)))
  1306. return -EPROTO;
  1307. num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  1308. num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  1309. num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  1310. for (i = 0; i < num_pdev_stats; i++) {
  1311. const struct wmi_10x_pdev_stats *src;
  1312. struct ath10k_fw_stats_pdev *dst;
  1313. src = (void *)skb->data;
  1314. if (!skb_pull(skb, sizeof(*src)))
  1315. return -EPROTO;
  1316. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1317. if (!dst)
  1318. continue;
  1319. ath10k_wmi_pull_pdev_stats(&src->old, dst);
  1320. dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
  1321. dst->rts_bad = __le32_to_cpu(src->rts_bad);
  1322. dst->rts_good = __le32_to_cpu(src->rts_good);
  1323. dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
  1324. dst->no_beacons = __le32_to_cpu(src->no_beacons);
  1325. dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
  1326. list_add_tail(&dst->list, &stats->pdevs);
  1327. }
  1328. /* fw doesn't implement vdev stats */
  1329. for (i = 0; i < num_peer_stats; i++) {
  1330. const struct wmi_10x_peer_stats *src;
  1331. struct ath10k_fw_stats_peer *dst;
  1332. src = (void *)skb->data;
  1333. if (!skb_pull(skb, sizeof(*src)))
  1334. return -EPROTO;
  1335. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1336. if (!dst)
  1337. continue;
  1338. ath10k_wmi_pull_peer_stats(&src->old, dst);
  1339. dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
  1340. list_add_tail(&dst->list, &stats->peers);
  1341. }
  1342. return 0;
  1343. }
  1344. int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
  1345. struct ath10k_fw_stats *stats)
  1346. {
  1347. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  1348. return ath10k_wmi_10x_pull_fw_stats(ar, skb, stats);
  1349. else
  1350. return ath10k_wmi_main_pull_fw_stats(ar, skb, stats);
  1351. }
  1352. static void ath10k_wmi_event_update_stats(struct ath10k *ar,
  1353. struct sk_buff *skb)
  1354. {
  1355. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
  1356. ath10k_debug_fw_stats_process(ar, skb);
  1357. }
  1358. static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
  1359. struct sk_buff *skb)
  1360. {
  1361. struct wmi_vdev_start_response_event *ev;
  1362. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
  1363. ev = (struct wmi_vdev_start_response_event *)skb->data;
  1364. if (WARN_ON(__le32_to_cpu(ev->status)))
  1365. return;
  1366. complete(&ar->vdev_setup_done);
  1367. }
  1368. static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
  1369. struct sk_buff *skb)
  1370. {
  1371. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
  1372. complete(&ar->vdev_setup_done);
  1373. }
  1374. static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
  1375. struct sk_buff *skb)
  1376. {
  1377. struct wmi_peer_sta_kickout_event *ev;
  1378. struct ieee80211_sta *sta;
  1379. ev = (struct wmi_peer_sta_kickout_event *)skb->data;
  1380. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
  1381. ev->peer_macaddr.addr);
  1382. rcu_read_lock();
  1383. sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL);
  1384. if (!sta) {
  1385. ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
  1386. ev->peer_macaddr.addr);
  1387. goto exit;
  1388. }
  1389. ieee80211_report_low_ack(sta, 10);
  1390. exit:
  1391. rcu_read_unlock();
  1392. }
  1393. /*
  1394. * FIXME
  1395. *
  1396. * We don't report to mac80211 sleep state of connected
  1397. * stations. Due to this mac80211 can't fill in TIM IE
  1398. * correctly.
  1399. *
  1400. * I know of no way of getting nullfunc frames that contain
  1401. * sleep transition from connected stations - these do not
  1402. * seem to be sent from the target to the host. There also
  1403. * doesn't seem to be a dedicated event for that. So the
  1404. * only way left to do this would be to read tim_bitmap
  1405. * during SWBA.
  1406. *
  1407. * We could probably try using tim_bitmap from SWBA to tell
  1408. * mac80211 which stations are asleep and which are not. The
  1409. * problem here is calling mac80211 functions so many times
  1410. * could take too long and make us miss the time to submit
  1411. * the beacon to the target.
  1412. *
  1413. * So as a workaround we try to extend the TIM IE if there
  1414. * is unicast buffered for stations with aid > 7 and fill it
  1415. * in ourselves.
  1416. */
  1417. static void ath10k_wmi_update_tim(struct ath10k *ar,
  1418. struct ath10k_vif *arvif,
  1419. struct sk_buff *bcn,
  1420. struct wmi_bcn_info *bcn_info)
  1421. {
  1422. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
  1423. struct ieee80211_tim_ie *tim;
  1424. u8 *ies, *ie;
  1425. u8 ie_len, pvm_len;
  1426. __le32 t;
  1427. u32 v;
  1428. /* if next SWBA has no tim_changed the tim_bitmap is garbage.
  1429. * we must copy the bitmap upon change and reuse it later */
  1430. if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
  1431. int i;
  1432. BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
  1433. sizeof(bcn_info->tim_info.tim_bitmap));
  1434. for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
  1435. t = bcn_info->tim_info.tim_bitmap[i / 4];
  1436. v = __le32_to_cpu(t);
  1437. arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
  1438. }
  1439. /* FW reports either length 0 or 16
  1440. * so we calculate this on our own */
  1441. arvif->u.ap.tim_len = 0;
  1442. for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
  1443. if (arvif->u.ap.tim_bitmap[i])
  1444. arvif->u.ap.tim_len = i;
  1445. arvif->u.ap.tim_len++;
  1446. }
  1447. ies = bcn->data;
  1448. ies += ieee80211_hdrlen(hdr->frame_control);
  1449. ies += 12; /* fixed parameters */
  1450. ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
  1451. (u8 *)skb_tail_pointer(bcn) - ies);
  1452. if (!ie) {
  1453. if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
  1454. ath10k_warn(ar, "no tim ie found;\n");
  1455. return;
  1456. }
  1457. tim = (void *)ie + 2;
  1458. ie_len = ie[1];
  1459. pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
  1460. if (pvm_len < arvif->u.ap.tim_len) {
  1461. int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
  1462. int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
  1463. void *next_ie = ie + 2 + ie_len;
  1464. if (skb_put(bcn, expand_size)) {
  1465. memmove(next_ie + expand_size, next_ie, move_size);
  1466. ie[1] += expand_size;
  1467. ie_len += expand_size;
  1468. pvm_len += expand_size;
  1469. } else {
  1470. ath10k_warn(ar, "tim expansion failed\n");
  1471. }
  1472. }
  1473. if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
  1474. ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
  1475. return;
  1476. }
  1477. tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
  1478. memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
  1479. if (tim->dtim_count == 0) {
  1480. ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
  1481. if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1)
  1482. ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
  1483. }
  1484. ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
  1485. tim->dtim_count, tim->dtim_period,
  1486. tim->bitmap_ctrl, pvm_len);
  1487. }
  1488. static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
  1489. struct wmi_p2p_noa_info *noa)
  1490. {
  1491. struct ieee80211_p2p_noa_attr *noa_attr;
  1492. u8 ctwindow_oppps = noa->ctwindow_oppps;
  1493. u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
  1494. bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
  1495. __le16 *noa_attr_len;
  1496. u16 attr_len;
  1497. u8 noa_descriptors = noa->num_descriptors;
  1498. int i;
  1499. /* P2P IE */
  1500. data[0] = WLAN_EID_VENDOR_SPECIFIC;
  1501. data[1] = len - 2;
  1502. data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
  1503. data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
  1504. data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
  1505. data[5] = WLAN_OUI_TYPE_WFA_P2P;
  1506. /* NOA ATTR */
  1507. data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
  1508. noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
  1509. noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
  1510. noa_attr->index = noa->index;
  1511. noa_attr->oppps_ctwindow = ctwindow;
  1512. if (oppps)
  1513. noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
  1514. for (i = 0; i < noa_descriptors; i++) {
  1515. noa_attr->desc[i].count =
  1516. __le32_to_cpu(noa->descriptors[i].type_count);
  1517. noa_attr->desc[i].duration = noa->descriptors[i].duration;
  1518. noa_attr->desc[i].interval = noa->descriptors[i].interval;
  1519. noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
  1520. }
  1521. attr_len = 2; /* index + oppps_ctwindow */
  1522. attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
  1523. *noa_attr_len = __cpu_to_le16(attr_len);
  1524. }
  1525. static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
  1526. {
  1527. u32 len = 0;
  1528. u8 noa_descriptors = noa->num_descriptors;
  1529. u8 opp_ps_info = noa->ctwindow_oppps;
  1530. bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
  1531. if (!noa_descriptors && !opps_enabled)
  1532. return len;
  1533. len += 1 + 1 + 4; /* EID + len + OUI */
  1534. len += 1 + 2; /* noa attr + attr len */
  1535. len += 1 + 1; /* index + oppps_ctwindow */
  1536. len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
  1537. return len;
  1538. }
  1539. static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
  1540. struct sk_buff *bcn,
  1541. struct wmi_bcn_info *bcn_info)
  1542. {
  1543. struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
  1544. u8 *new_data, *old_data = arvif->u.ap.noa_data;
  1545. u32 new_len;
  1546. if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
  1547. return;
  1548. ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
  1549. if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
  1550. new_len = ath10k_p2p_calc_noa_ie_len(noa);
  1551. if (!new_len)
  1552. goto cleanup;
  1553. new_data = kmalloc(new_len, GFP_ATOMIC);
  1554. if (!new_data)
  1555. goto cleanup;
  1556. ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
  1557. spin_lock_bh(&ar->data_lock);
  1558. arvif->u.ap.noa_data = new_data;
  1559. arvif->u.ap.noa_len = new_len;
  1560. spin_unlock_bh(&ar->data_lock);
  1561. kfree(old_data);
  1562. }
  1563. if (arvif->u.ap.noa_data)
  1564. if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
  1565. memcpy(skb_put(bcn, arvif->u.ap.noa_len),
  1566. arvif->u.ap.noa_data,
  1567. arvif->u.ap.noa_len);
  1568. return;
  1569. cleanup:
  1570. spin_lock_bh(&ar->data_lock);
  1571. arvif->u.ap.noa_data = NULL;
  1572. arvif->u.ap.noa_len = 0;
  1573. spin_unlock_bh(&ar->data_lock);
  1574. kfree(old_data);
  1575. }
  1576. static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
  1577. {
  1578. struct wmi_host_swba_event *ev;
  1579. u32 map;
  1580. int i = -1;
  1581. struct wmi_bcn_info *bcn_info;
  1582. struct ath10k_vif *arvif;
  1583. struct sk_buff *bcn;
  1584. dma_addr_t paddr;
  1585. int ret, vdev_id = 0;
  1586. ev = (struct wmi_host_swba_event *)skb->data;
  1587. map = __le32_to_cpu(ev->vdev_map);
  1588. ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
  1589. ev->vdev_map);
  1590. for (; map; map >>= 1, vdev_id++) {
  1591. if (!(map & 0x1))
  1592. continue;
  1593. i++;
  1594. if (i >= WMI_MAX_AP_VDEV) {
  1595. ath10k_warn(ar, "swba has corrupted vdev map\n");
  1596. break;
  1597. }
  1598. bcn_info = &ev->bcn_info[i];
  1599. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1600. "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
  1601. i,
  1602. __le32_to_cpu(bcn_info->tim_info.tim_len),
  1603. __le32_to_cpu(bcn_info->tim_info.tim_mcast),
  1604. __le32_to_cpu(bcn_info->tim_info.tim_changed),
  1605. __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
  1606. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
  1607. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
  1608. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
  1609. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
  1610. arvif = ath10k_get_arvif(ar, vdev_id);
  1611. if (arvif == NULL) {
  1612. ath10k_warn(ar, "no vif for vdev_id %d found\n",
  1613. vdev_id);
  1614. continue;
  1615. }
  1616. /* There are no completions for beacons so wait for next SWBA
  1617. * before telling mac80211 to decrement CSA counter
  1618. *
  1619. * Once CSA counter is completed stop sending beacons until
  1620. * actual channel switch is done */
  1621. if (arvif->vif->csa_active &&
  1622. ieee80211_csa_is_complete(arvif->vif)) {
  1623. ieee80211_csa_finish(arvif->vif);
  1624. continue;
  1625. }
  1626. bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
  1627. if (!bcn) {
  1628. ath10k_warn(ar, "could not get mac80211 beacon\n");
  1629. continue;
  1630. }
  1631. ath10k_tx_h_seq_no(arvif->vif, bcn);
  1632. ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
  1633. ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
  1634. spin_lock_bh(&ar->data_lock);
  1635. if (arvif->beacon) {
  1636. if (!arvif->beacon_sent)
  1637. ath10k_warn(ar, "SWBA overrun on vdev %d\n",
  1638. arvif->vdev_id);
  1639. ath10k_mac_vif_beacon_free(arvif);
  1640. }
  1641. if (!arvif->beacon_buf) {
  1642. paddr = dma_map_single(arvif->ar->dev, bcn->data,
  1643. bcn->len, DMA_TO_DEVICE);
  1644. ret = dma_mapping_error(arvif->ar->dev, paddr);
  1645. if (ret) {
  1646. ath10k_warn(ar, "failed to map beacon: %d\n",
  1647. ret);
  1648. dev_kfree_skb_any(bcn);
  1649. goto skip;
  1650. }
  1651. ATH10K_SKB_CB(bcn)->paddr = paddr;
  1652. } else {
  1653. if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
  1654. ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
  1655. bcn->len, IEEE80211_MAX_FRAME_LEN);
  1656. skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
  1657. }
  1658. memcpy(arvif->beacon_buf, bcn->data, bcn->len);
  1659. ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
  1660. }
  1661. arvif->beacon = bcn;
  1662. arvif->beacon_sent = false;
  1663. trace_ath10k_wmi_bcn_tx(ar, bcn->data, bcn->len);
  1664. ath10k_wmi_tx_beacon_nowait(arvif);
  1665. skip:
  1666. spin_unlock_bh(&ar->data_lock);
  1667. }
  1668. }
  1669. static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
  1670. struct sk_buff *skb)
  1671. {
  1672. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
  1673. }
  1674. static void ath10k_dfs_radar_report(struct ath10k *ar,
  1675. const struct wmi_phyerr *phyerr,
  1676. const struct phyerr_radar_report *rr,
  1677. u64 tsf)
  1678. {
  1679. u32 reg0, reg1, tsf32l;
  1680. struct pulse_event pe;
  1681. u64 tsf64;
  1682. u8 rssi, width;
  1683. reg0 = __le32_to_cpu(rr->reg0);
  1684. reg1 = __le32_to_cpu(rr->reg1);
  1685. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1686. "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
  1687. MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
  1688. MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
  1689. MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
  1690. MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
  1691. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1692. "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
  1693. MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
  1694. MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
  1695. MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
  1696. MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
  1697. MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
  1698. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1699. "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
  1700. MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
  1701. MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
  1702. if (!ar->dfs_detector)
  1703. return;
  1704. /* report event to DFS pattern detector */
  1705. tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
  1706. tsf64 = tsf & (~0xFFFFFFFFULL);
  1707. tsf64 |= tsf32l;
  1708. width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
  1709. rssi = phyerr->rssi_combined;
  1710. /* hardware store this as 8 bit signed value,
  1711. * set to zero if negative number
  1712. */
  1713. if (rssi & 0x80)
  1714. rssi = 0;
  1715. pe.ts = tsf64;
  1716. pe.freq = ar->hw->conf.chandef.chan->center_freq;
  1717. pe.width = width;
  1718. pe.rssi = rssi;
  1719. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1720. "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
  1721. pe.freq, pe.width, pe.rssi, pe.ts);
  1722. ATH10K_DFS_STAT_INC(ar, pulses_detected);
  1723. if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
  1724. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1725. "dfs no pulse pattern detected, yet\n");
  1726. return;
  1727. }
  1728. ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
  1729. ATH10K_DFS_STAT_INC(ar, radar_detected);
  1730. /* Control radar events reporting in debugfs file
  1731. dfs_block_radar_events */
  1732. if (ar->dfs_block_radar_events) {
  1733. ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
  1734. return;
  1735. }
  1736. ieee80211_radar_detected(ar->hw);
  1737. }
  1738. static int ath10k_dfs_fft_report(struct ath10k *ar,
  1739. const struct wmi_phyerr *phyerr,
  1740. const struct phyerr_fft_report *fftr,
  1741. u64 tsf)
  1742. {
  1743. u32 reg0, reg1;
  1744. u8 rssi, peak_mag;
  1745. reg0 = __le32_to_cpu(fftr->reg0);
  1746. reg1 = __le32_to_cpu(fftr->reg1);
  1747. rssi = phyerr->rssi_combined;
  1748. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1749. "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
  1750. MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
  1751. MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
  1752. MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
  1753. MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
  1754. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1755. "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
  1756. MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
  1757. MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
  1758. MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
  1759. MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
  1760. peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
  1761. /* false event detection */
  1762. if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
  1763. peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
  1764. ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
  1765. ATH10K_DFS_STAT_INC(ar, pulses_discarded);
  1766. return -EINVAL;
  1767. }
  1768. return 0;
  1769. }
  1770. static void ath10k_wmi_event_dfs(struct ath10k *ar,
  1771. const struct wmi_phyerr *phyerr,
  1772. u64 tsf)
  1773. {
  1774. int buf_len, tlv_len, res, i = 0;
  1775. const struct phyerr_tlv *tlv;
  1776. const struct phyerr_radar_report *rr;
  1777. const struct phyerr_fft_report *fftr;
  1778. const u8 *tlv_buf;
  1779. buf_len = __le32_to_cpu(phyerr->buf_len);
  1780. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1781. "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
  1782. phyerr->phy_err_code, phyerr->rssi_combined,
  1783. __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len);
  1784. /* Skip event if DFS disabled */
  1785. if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
  1786. return;
  1787. ATH10K_DFS_STAT_INC(ar, pulses_total);
  1788. while (i < buf_len) {
  1789. if (i + sizeof(*tlv) > buf_len) {
  1790. ath10k_warn(ar, "too short buf for tlv header (%d)\n",
  1791. i);
  1792. return;
  1793. }
  1794. tlv = (struct phyerr_tlv *)&phyerr->buf[i];
  1795. tlv_len = __le16_to_cpu(tlv->len);
  1796. tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
  1797. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1798. "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
  1799. tlv_len, tlv->tag, tlv->sig);
  1800. switch (tlv->tag) {
  1801. case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
  1802. if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
  1803. ath10k_warn(ar, "too short radar pulse summary (%d)\n",
  1804. i);
  1805. return;
  1806. }
  1807. rr = (struct phyerr_radar_report *)tlv_buf;
  1808. ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
  1809. break;
  1810. case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
  1811. if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
  1812. ath10k_warn(ar, "too short fft report (%d)\n",
  1813. i);
  1814. return;
  1815. }
  1816. fftr = (struct phyerr_fft_report *)tlv_buf;
  1817. res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
  1818. if (res)
  1819. return;
  1820. break;
  1821. }
  1822. i += sizeof(*tlv) + tlv_len;
  1823. }
  1824. }
  1825. static void
  1826. ath10k_wmi_event_spectral_scan(struct ath10k *ar,
  1827. const struct wmi_phyerr *phyerr,
  1828. u64 tsf)
  1829. {
  1830. int buf_len, tlv_len, res, i = 0;
  1831. struct phyerr_tlv *tlv;
  1832. const void *tlv_buf;
  1833. const struct phyerr_fft_report *fftr;
  1834. size_t fftr_len;
  1835. buf_len = __le32_to_cpu(phyerr->buf_len);
  1836. while (i < buf_len) {
  1837. if (i + sizeof(*tlv) > buf_len) {
  1838. ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
  1839. i);
  1840. return;
  1841. }
  1842. tlv = (struct phyerr_tlv *)&phyerr->buf[i];
  1843. tlv_len = __le16_to_cpu(tlv->len);
  1844. tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
  1845. if (i + sizeof(*tlv) + tlv_len > buf_len) {
  1846. ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
  1847. i);
  1848. return;
  1849. }
  1850. switch (tlv->tag) {
  1851. case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
  1852. if (sizeof(*fftr) > tlv_len) {
  1853. ath10k_warn(ar, "failed to parse fft report at byte %d\n",
  1854. i);
  1855. return;
  1856. }
  1857. fftr_len = tlv_len - sizeof(*fftr);
  1858. fftr = tlv_buf;
  1859. res = ath10k_spectral_process_fft(ar, phyerr,
  1860. fftr, fftr_len,
  1861. tsf);
  1862. if (res < 0) {
  1863. ath10k_warn(ar, "failed to process fft report: %d\n",
  1864. res);
  1865. return;
  1866. }
  1867. break;
  1868. }
  1869. i += sizeof(*tlv) + tlv_len;
  1870. }
  1871. }
  1872. static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
  1873. {
  1874. const struct wmi_phyerr_event *ev;
  1875. const struct wmi_phyerr *phyerr;
  1876. u32 count, i, buf_len, phy_err_code;
  1877. u64 tsf;
  1878. int left_len = skb->len;
  1879. ATH10K_DFS_STAT_INC(ar, phy_errors);
  1880. /* Check if combined event available */
  1881. if (left_len < sizeof(*ev)) {
  1882. ath10k_warn(ar, "wmi phyerr combined event wrong len\n");
  1883. return;
  1884. }
  1885. left_len -= sizeof(*ev);
  1886. /* Check number of included events */
  1887. ev = (const struct wmi_phyerr_event *)skb->data;
  1888. count = __le32_to_cpu(ev->num_phyerrs);
  1889. tsf = __le32_to_cpu(ev->tsf_u32);
  1890. tsf <<= 32;
  1891. tsf |= __le32_to_cpu(ev->tsf_l32);
  1892. ath10k_dbg(ar, ATH10K_DBG_WMI,
  1893. "wmi event phyerr count %d tsf64 0x%llX\n",
  1894. count, tsf);
  1895. phyerr = ev->phyerrs;
  1896. for (i = 0; i < count; i++) {
  1897. /* Check if we can read event header */
  1898. if (left_len < sizeof(*phyerr)) {
  1899. ath10k_warn(ar, "single event (%d) wrong head len\n",
  1900. i);
  1901. return;
  1902. }
  1903. left_len -= sizeof(*phyerr);
  1904. buf_len = __le32_to_cpu(phyerr->buf_len);
  1905. phy_err_code = phyerr->phy_err_code;
  1906. if (left_len < buf_len) {
  1907. ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
  1908. return;
  1909. }
  1910. left_len -= buf_len;
  1911. switch (phy_err_code) {
  1912. case PHY_ERROR_RADAR:
  1913. ath10k_wmi_event_dfs(ar, phyerr, tsf);
  1914. break;
  1915. case PHY_ERROR_SPECTRAL_SCAN:
  1916. ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
  1917. break;
  1918. case PHY_ERROR_FALSE_RADAR_EXT:
  1919. ath10k_wmi_event_dfs(ar, phyerr, tsf);
  1920. ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
  1921. break;
  1922. default:
  1923. break;
  1924. }
  1925. phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len;
  1926. }
  1927. }
  1928. static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
  1929. {
  1930. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
  1931. }
  1932. static void ath10k_wmi_event_profile_match(struct ath10k *ar,
  1933. struct sk_buff *skb)
  1934. {
  1935. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
  1936. }
  1937. static void ath10k_wmi_event_debug_print(struct ath10k *ar,
  1938. struct sk_buff *skb)
  1939. {
  1940. char buf[101], c;
  1941. int i;
  1942. for (i = 0; i < sizeof(buf) - 1; i++) {
  1943. if (i >= skb->len)
  1944. break;
  1945. c = skb->data[i];
  1946. if (c == '\0')
  1947. break;
  1948. if (isascii(c) && isprint(c))
  1949. buf[i] = c;
  1950. else
  1951. buf[i] = '.';
  1952. }
  1953. if (i == sizeof(buf) - 1)
  1954. ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
  1955. /* for some reason the debug prints end with \n, remove that */
  1956. if (skb->data[i - 1] == '\n')
  1957. i--;
  1958. /* the last byte is always reserved for the null character */
  1959. buf[i] = '\0';
  1960. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);
  1961. }
  1962. static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
  1963. {
  1964. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
  1965. }
  1966. static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
  1967. struct sk_buff *skb)
  1968. {
  1969. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
  1970. }
  1971. static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
  1972. struct sk_buff *skb)
  1973. {
  1974. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
  1975. }
  1976. static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
  1977. struct sk_buff *skb)
  1978. {
  1979. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
  1980. }
  1981. static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
  1982. struct sk_buff *skb)
  1983. {
  1984. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
  1985. }
  1986. static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
  1987. struct sk_buff *skb)
  1988. {
  1989. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
  1990. }
  1991. static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
  1992. struct sk_buff *skb)
  1993. {
  1994. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
  1995. }
  1996. static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
  1997. struct sk_buff *skb)
  1998. {
  1999. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
  2000. }
  2001. static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
  2002. struct sk_buff *skb)
  2003. {
  2004. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
  2005. }
  2006. static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
  2007. struct sk_buff *skb)
  2008. {
  2009. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
  2010. }
  2011. static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
  2012. struct sk_buff *skb)
  2013. {
  2014. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
  2015. }
  2016. static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
  2017. struct sk_buff *skb)
  2018. {
  2019. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
  2020. }
  2021. static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
  2022. struct sk_buff *skb)
  2023. {
  2024. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
  2025. }
  2026. static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
  2027. struct sk_buff *skb)
  2028. {
  2029. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
  2030. }
  2031. static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
  2032. struct sk_buff *skb)
  2033. {
  2034. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
  2035. }
  2036. static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
  2037. struct sk_buff *skb)
  2038. {
  2039. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
  2040. }
  2041. static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
  2042. struct sk_buff *skb)
  2043. {
  2044. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
  2045. }
  2046. static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
  2047. u32 num_units, u32 unit_len)
  2048. {
  2049. dma_addr_t paddr;
  2050. u32 pool_size;
  2051. int idx = ar->wmi.num_mem_chunks;
  2052. pool_size = num_units * round_up(unit_len, 4);
  2053. if (!pool_size)
  2054. return -EINVAL;
  2055. ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
  2056. pool_size,
  2057. &paddr,
  2058. GFP_ATOMIC);
  2059. if (!ar->wmi.mem_chunks[idx].vaddr) {
  2060. ath10k_warn(ar, "failed to allocate memory chunk\n");
  2061. return -ENOMEM;
  2062. }
  2063. memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
  2064. ar->wmi.mem_chunks[idx].paddr = paddr;
  2065. ar->wmi.mem_chunks[idx].len = pool_size;
  2066. ar->wmi.mem_chunks[idx].req_id = req_id;
  2067. ar->wmi.num_mem_chunks++;
  2068. return 0;
  2069. }
  2070. static int ath10k_wmi_main_pull_svc_rdy_ev(struct sk_buff *skb,
  2071. struct wmi_svc_rdy_ev_arg *arg)
  2072. {
  2073. struct wmi_service_ready_event *ev;
  2074. size_t i, n;
  2075. if (skb->len < sizeof(*ev))
  2076. return -EPROTO;
  2077. ev = (void *)skb->data;
  2078. skb_pull(skb, sizeof(*ev));
  2079. arg->min_tx_power = ev->hw_min_tx_power;
  2080. arg->max_tx_power = ev->hw_max_tx_power;
  2081. arg->ht_cap = ev->ht_cap_info;
  2082. arg->vht_cap = ev->vht_cap_info;
  2083. arg->sw_ver0 = ev->sw_version;
  2084. arg->sw_ver1 = ev->sw_version_1;
  2085. arg->phy_capab = ev->phy_capability;
  2086. arg->num_rf_chains = ev->num_rf_chains;
  2087. arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
  2088. arg->num_mem_reqs = ev->num_mem_reqs;
  2089. arg->service_map = ev->wmi_service_bitmap;
  2090. n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
  2091. ARRAY_SIZE(arg->mem_reqs));
  2092. for (i = 0; i < n; i++)
  2093. arg->mem_reqs[i] = &ev->mem_reqs[i];
  2094. if (skb->len <
  2095. __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
  2096. return -EPROTO;
  2097. return 0;
  2098. }
  2099. static int ath10k_wmi_10x_pull_svc_rdy_ev(struct sk_buff *skb,
  2100. struct wmi_svc_rdy_ev_arg *arg)
  2101. {
  2102. struct wmi_10x_service_ready_event *ev;
  2103. int i, n;
  2104. if (skb->len < sizeof(*ev))
  2105. return -EPROTO;
  2106. ev = (void *)skb->data;
  2107. skb_pull(skb, sizeof(*ev));
  2108. arg->min_tx_power = ev->hw_min_tx_power;
  2109. arg->max_tx_power = ev->hw_max_tx_power;
  2110. arg->ht_cap = ev->ht_cap_info;
  2111. arg->vht_cap = ev->vht_cap_info;
  2112. arg->sw_ver0 = ev->sw_version;
  2113. arg->phy_capab = ev->phy_capability;
  2114. arg->num_rf_chains = ev->num_rf_chains;
  2115. arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
  2116. arg->num_mem_reqs = ev->num_mem_reqs;
  2117. arg->service_map = ev->wmi_service_bitmap;
  2118. n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
  2119. ARRAY_SIZE(arg->mem_reqs));
  2120. for (i = 0; i < n; i++)
  2121. arg->mem_reqs[i] = &ev->mem_reqs[i];
  2122. if (skb->len <
  2123. __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
  2124. return -EPROTO;
  2125. return 0;
  2126. }
  2127. static void ath10k_wmi_event_service_ready(struct ath10k *ar,
  2128. struct sk_buff *skb)
  2129. {
  2130. struct wmi_svc_rdy_ev_arg arg = {};
  2131. u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
  2132. DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {};
  2133. int ret;
  2134. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  2135. ret = ath10k_wmi_10x_pull_svc_rdy_ev(skb, &arg);
  2136. wmi_10x_svc_map(arg.service_map, svc_bmap);
  2137. } else {
  2138. ret = ath10k_wmi_main_pull_svc_rdy_ev(skb, &arg);
  2139. wmi_main_svc_map(arg.service_map, svc_bmap);
  2140. }
  2141. if (ret) {
  2142. ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
  2143. return;
  2144. }
  2145. ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
  2146. ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
  2147. ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
  2148. ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
  2149. ar->fw_version_major =
  2150. (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
  2151. ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
  2152. ar->fw_version_release =
  2153. (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
  2154. ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
  2155. ar->phy_capability = __le32_to_cpu(arg.phy_capab);
  2156. ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
  2157. ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd);
  2158. ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
  2159. ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
  2160. arg.service_map, sizeof(arg.service_map));
  2161. /* only manually set fw features when not using FW IE format */
  2162. if (ar->fw_api == 1 && ar->fw_version_build > 636)
  2163. set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
  2164. if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
  2165. ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
  2166. ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
  2167. ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
  2168. }
  2169. ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
  2170. ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1;
  2171. if (strlen(ar->hw->wiphy->fw_version) == 0) {
  2172. snprintf(ar->hw->wiphy->fw_version,
  2173. sizeof(ar->hw->wiphy->fw_version),
  2174. "%u.%u.%u.%u",
  2175. ar->fw_version_major,
  2176. ar->fw_version_minor,
  2177. ar->fw_version_release,
  2178. ar->fw_version_build);
  2179. }
  2180. num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
  2181. if (num_mem_reqs > WMI_MAX_MEM_REQS) {
  2182. ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
  2183. num_mem_reqs);
  2184. return;
  2185. }
  2186. for (i = 0; i < num_mem_reqs; ++i) {
  2187. req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
  2188. num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
  2189. unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
  2190. num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
  2191. if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
  2192. /* number of units to allocate is number of
  2193. * peers, 1 extra for self peer on target */
  2194. /* this needs to be tied, host and target
  2195. * can get out of sync */
  2196. num_units = TARGET_10X_NUM_PEERS + 1;
  2197. else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
  2198. num_units = TARGET_10X_NUM_VDEVS + 1;
  2199. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2200. "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
  2201. req_id,
  2202. __le32_to_cpu(arg.mem_reqs[i]->num_units),
  2203. num_unit_info,
  2204. unit_size,
  2205. num_units);
  2206. ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
  2207. unit_size);
  2208. if (ret)
  2209. return;
  2210. }
  2211. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2212. "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
  2213. __le32_to_cpu(arg.min_tx_power),
  2214. __le32_to_cpu(arg.max_tx_power),
  2215. __le32_to_cpu(arg.ht_cap),
  2216. __le32_to_cpu(arg.vht_cap),
  2217. __le32_to_cpu(arg.sw_ver0),
  2218. __le32_to_cpu(arg.sw_ver1),
  2219. __le32_to_cpu(arg.phy_capab),
  2220. __le32_to_cpu(arg.num_rf_chains),
  2221. __le32_to_cpu(arg.eeprom_rd),
  2222. __le32_to_cpu(arg.num_mem_reqs));
  2223. complete(&ar->wmi.service_ready);
  2224. }
  2225. static int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
  2226. {
  2227. struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
  2228. if (WARN_ON(skb->len < sizeof(*ev)))
  2229. return -EINVAL;
  2230. ether_addr_copy(ar->mac_addr, ev->mac_addr.addr);
  2231. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2232. "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
  2233. __le32_to_cpu(ev->sw_version),
  2234. __le32_to_cpu(ev->abi_version),
  2235. ev->mac_addr.addr,
  2236. __le32_to_cpu(ev->status), skb->len, sizeof(*ev));
  2237. complete(&ar->wmi.unified_ready);
  2238. return 0;
  2239. }
  2240. static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
  2241. {
  2242. struct wmi_cmd_hdr *cmd_hdr;
  2243. enum wmi_event_id id;
  2244. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  2245. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  2246. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  2247. return;
  2248. trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  2249. switch (id) {
  2250. case WMI_MGMT_RX_EVENTID:
  2251. ath10k_wmi_event_mgmt_rx(ar, skb);
  2252. /* mgmt_rx() owns the skb now! */
  2253. return;
  2254. case WMI_SCAN_EVENTID:
  2255. ath10k_wmi_event_scan(ar, skb);
  2256. break;
  2257. case WMI_CHAN_INFO_EVENTID:
  2258. ath10k_wmi_event_chan_info(ar, skb);
  2259. break;
  2260. case WMI_ECHO_EVENTID:
  2261. ath10k_wmi_event_echo(ar, skb);
  2262. break;
  2263. case WMI_DEBUG_MESG_EVENTID:
  2264. ath10k_wmi_event_debug_mesg(ar, skb);
  2265. break;
  2266. case WMI_UPDATE_STATS_EVENTID:
  2267. ath10k_wmi_event_update_stats(ar, skb);
  2268. break;
  2269. case WMI_VDEV_START_RESP_EVENTID:
  2270. ath10k_wmi_event_vdev_start_resp(ar, skb);
  2271. break;
  2272. case WMI_VDEV_STOPPED_EVENTID:
  2273. ath10k_wmi_event_vdev_stopped(ar, skb);
  2274. break;
  2275. case WMI_PEER_STA_KICKOUT_EVENTID:
  2276. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  2277. break;
  2278. case WMI_HOST_SWBA_EVENTID:
  2279. ath10k_wmi_event_host_swba(ar, skb);
  2280. break;
  2281. case WMI_TBTTOFFSET_UPDATE_EVENTID:
  2282. ath10k_wmi_event_tbttoffset_update(ar, skb);
  2283. break;
  2284. case WMI_PHYERR_EVENTID:
  2285. ath10k_wmi_event_phyerr(ar, skb);
  2286. break;
  2287. case WMI_ROAM_EVENTID:
  2288. ath10k_wmi_event_roam(ar, skb);
  2289. break;
  2290. case WMI_PROFILE_MATCH:
  2291. ath10k_wmi_event_profile_match(ar, skb);
  2292. break;
  2293. case WMI_DEBUG_PRINT_EVENTID:
  2294. ath10k_wmi_event_debug_print(ar, skb);
  2295. break;
  2296. case WMI_PDEV_QVIT_EVENTID:
  2297. ath10k_wmi_event_pdev_qvit(ar, skb);
  2298. break;
  2299. case WMI_WLAN_PROFILE_DATA_EVENTID:
  2300. ath10k_wmi_event_wlan_profile_data(ar, skb);
  2301. break;
  2302. case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
  2303. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  2304. break;
  2305. case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
  2306. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  2307. break;
  2308. case WMI_RTT_ERROR_REPORT_EVENTID:
  2309. ath10k_wmi_event_rtt_error_report(ar, skb);
  2310. break;
  2311. case WMI_WOW_WAKEUP_HOST_EVENTID:
  2312. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  2313. break;
  2314. case WMI_DCS_INTERFERENCE_EVENTID:
  2315. ath10k_wmi_event_dcs_interference(ar, skb);
  2316. break;
  2317. case WMI_PDEV_TPC_CONFIG_EVENTID:
  2318. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  2319. break;
  2320. case WMI_PDEV_FTM_INTG_EVENTID:
  2321. ath10k_wmi_event_pdev_ftm_intg(ar, skb);
  2322. break;
  2323. case WMI_GTK_OFFLOAD_STATUS_EVENTID:
  2324. ath10k_wmi_event_gtk_offload_status(ar, skb);
  2325. break;
  2326. case WMI_GTK_REKEY_FAIL_EVENTID:
  2327. ath10k_wmi_event_gtk_rekey_fail(ar, skb);
  2328. break;
  2329. case WMI_TX_DELBA_COMPLETE_EVENTID:
  2330. ath10k_wmi_event_delba_complete(ar, skb);
  2331. break;
  2332. case WMI_TX_ADDBA_COMPLETE_EVENTID:
  2333. ath10k_wmi_event_addba_complete(ar, skb);
  2334. break;
  2335. case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
  2336. ath10k_wmi_event_vdev_install_key_complete(ar, skb);
  2337. break;
  2338. case WMI_SERVICE_READY_EVENTID:
  2339. ath10k_wmi_event_service_ready(ar, skb);
  2340. break;
  2341. case WMI_READY_EVENTID:
  2342. ath10k_wmi_event_ready(ar, skb);
  2343. break;
  2344. default:
  2345. ath10k_warn(ar, "Unknown eventid: %d\n", id);
  2346. break;
  2347. }
  2348. dev_kfree_skb(skb);
  2349. }
  2350. static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
  2351. {
  2352. struct wmi_cmd_hdr *cmd_hdr;
  2353. enum wmi_10x_event_id id;
  2354. bool consumed;
  2355. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  2356. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  2357. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  2358. return;
  2359. trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  2360. consumed = ath10k_tm_event_wmi(ar, id, skb);
  2361. /* Ready event must be handled normally also in UTF mode so that we
  2362. * know the UTF firmware has booted, others we are just bypass WMI
  2363. * events to testmode.
  2364. */
  2365. if (consumed && id != WMI_10X_READY_EVENTID) {
  2366. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2367. "wmi testmode consumed 0x%x\n", id);
  2368. goto out;
  2369. }
  2370. switch (id) {
  2371. case WMI_10X_MGMT_RX_EVENTID:
  2372. ath10k_wmi_event_mgmt_rx(ar, skb);
  2373. /* mgmt_rx() owns the skb now! */
  2374. return;
  2375. case WMI_10X_SCAN_EVENTID:
  2376. ath10k_wmi_event_scan(ar, skb);
  2377. break;
  2378. case WMI_10X_CHAN_INFO_EVENTID:
  2379. ath10k_wmi_event_chan_info(ar, skb);
  2380. break;
  2381. case WMI_10X_ECHO_EVENTID:
  2382. ath10k_wmi_event_echo(ar, skb);
  2383. break;
  2384. case WMI_10X_DEBUG_MESG_EVENTID:
  2385. ath10k_wmi_event_debug_mesg(ar, skb);
  2386. break;
  2387. case WMI_10X_UPDATE_STATS_EVENTID:
  2388. ath10k_wmi_event_update_stats(ar, skb);
  2389. break;
  2390. case WMI_10X_VDEV_START_RESP_EVENTID:
  2391. ath10k_wmi_event_vdev_start_resp(ar, skb);
  2392. break;
  2393. case WMI_10X_VDEV_STOPPED_EVENTID:
  2394. ath10k_wmi_event_vdev_stopped(ar, skb);
  2395. break;
  2396. case WMI_10X_PEER_STA_KICKOUT_EVENTID:
  2397. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  2398. break;
  2399. case WMI_10X_HOST_SWBA_EVENTID:
  2400. ath10k_wmi_event_host_swba(ar, skb);
  2401. break;
  2402. case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
  2403. ath10k_wmi_event_tbttoffset_update(ar, skb);
  2404. break;
  2405. case WMI_10X_PHYERR_EVENTID:
  2406. ath10k_wmi_event_phyerr(ar, skb);
  2407. break;
  2408. case WMI_10X_ROAM_EVENTID:
  2409. ath10k_wmi_event_roam(ar, skb);
  2410. break;
  2411. case WMI_10X_PROFILE_MATCH:
  2412. ath10k_wmi_event_profile_match(ar, skb);
  2413. break;
  2414. case WMI_10X_DEBUG_PRINT_EVENTID:
  2415. ath10k_wmi_event_debug_print(ar, skb);
  2416. break;
  2417. case WMI_10X_PDEV_QVIT_EVENTID:
  2418. ath10k_wmi_event_pdev_qvit(ar, skb);
  2419. break;
  2420. case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
  2421. ath10k_wmi_event_wlan_profile_data(ar, skb);
  2422. break;
  2423. case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
  2424. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  2425. break;
  2426. case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
  2427. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  2428. break;
  2429. case WMI_10X_RTT_ERROR_REPORT_EVENTID:
  2430. ath10k_wmi_event_rtt_error_report(ar, skb);
  2431. break;
  2432. case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
  2433. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  2434. break;
  2435. case WMI_10X_DCS_INTERFERENCE_EVENTID:
  2436. ath10k_wmi_event_dcs_interference(ar, skb);
  2437. break;
  2438. case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
  2439. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  2440. break;
  2441. case WMI_10X_INST_RSSI_STATS_EVENTID:
  2442. ath10k_wmi_event_inst_rssi_stats(ar, skb);
  2443. break;
  2444. case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
  2445. ath10k_wmi_event_vdev_standby_req(ar, skb);
  2446. break;
  2447. case WMI_10X_VDEV_RESUME_REQ_EVENTID:
  2448. ath10k_wmi_event_vdev_resume_req(ar, skb);
  2449. break;
  2450. case WMI_10X_SERVICE_READY_EVENTID:
  2451. ath10k_wmi_event_service_ready(ar, skb);
  2452. break;
  2453. case WMI_10X_READY_EVENTID:
  2454. ath10k_wmi_event_ready(ar, skb);
  2455. break;
  2456. case WMI_10X_PDEV_UTF_EVENTID:
  2457. /* ignore utf events */
  2458. break;
  2459. default:
  2460. ath10k_warn(ar, "Unknown eventid: %d\n", id);
  2461. break;
  2462. }
  2463. out:
  2464. dev_kfree_skb(skb);
  2465. }
  2466. static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb)
  2467. {
  2468. struct wmi_cmd_hdr *cmd_hdr;
  2469. enum wmi_10_2_event_id id;
  2470. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  2471. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  2472. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  2473. return;
  2474. trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  2475. switch (id) {
  2476. case WMI_10_2_MGMT_RX_EVENTID:
  2477. ath10k_wmi_event_mgmt_rx(ar, skb);
  2478. /* mgmt_rx() owns the skb now! */
  2479. return;
  2480. case WMI_10_2_SCAN_EVENTID:
  2481. ath10k_wmi_event_scan(ar, skb);
  2482. break;
  2483. case WMI_10_2_CHAN_INFO_EVENTID:
  2484. ath10k_wmi_event_chan_info(ar, skb);
  2485. break;
  2486. case WMI_10_2_ECHO_EVENTID:
  2487. ath10k_wmi_event_echo(ar, skb);
  2488. break;
  2489. case WMI_10_2_DEBUG_MESG_EVENTID:
  2490. ath10k_wmi_event_debug_mesg(ar, skb);
  2491. break;
  2492. case WMI_10_2_UPDATE_STATS_EVENTID:
  2493. ath10k_wmi_event_update_stats(ar, skb);
  2494. break;
  2495. case WMI_10_2_VDEV_START_RESP_EVENTID:
  2496. ath10k_wmi_event_vdev_start_resp(ar, skb);
  2497. break;
  2498. case WMI_10_2_VDEV_STOPPED_EVENTID:
  2499. ath10k_wmi_event_vdev_stopped(ar, skb);
  2500. break;
  2501. case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
  2502. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  2503. break;
  2504. case WMI_10_2_HOST_SWBA_EVENTID:
  2505. ath10k_wmi_event_host_swba(ar, skb);
  2506. break;
  2507. case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
  2508. ath10k_wmi_event_tbttoffset_update(ar, skb);
  2509. break;
  2510. case WMI_10_2_PHYERR_EVENTID:
  2511. ath10k_wmi_event_phyerr(ar, skb);
  2512. break;
  2513. case WMI_10_2_ROAM_EVENTID:
  2514. ath10k_wmi_event_roam(ar, skb);
  2515. break;
  2516. case WMI_10_2_PROFILE_MATCH:
  2517. ath10k_wmi_event_profile_match(ar, skb);
  2518. break;
  2519. case WMI_10_2_DEBUG_PRINT_EVENTID:
  2520. ath10k_wmi_event_debug_print(ar, skb);
  2521. break;
  2522. case WMI_10_2_PDEV_QVIT_EVENTID:
  2523. ath10k_wmi_event_pdev_qvit(ar, skb);
  2524. break;
  2525. case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
  2526. ath10k_wmi_event_wlan_profile_data(ar, skb);
  2527. break;
  2528. case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
  2529. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  2530. break;
  2531. case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
  2532. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  2533. break;
  2534. case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
  2535. ath10k_wmi_event_rtt_error_report(ar, skb);
  2536. break;
  2537. case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
  2538. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  2539. break;
  2540. case WMI_10_2_DCS_INTERFERENCE_EVENTID:
  2541. ath10k_wmi_event_dcs_interference(ar, skb);
  2542. break;
  2543. case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
  2544. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  2545. break;
  2546. case WMI_10_2_INST_RSSI_STATS_EVENTID:
  2547. ath10k_wmi_event_inst_rssi_stats(ar, skb);
  2548. break;
  2549. case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
  2550. ath10k_wmi_event_vdev_standby_req(ar, skb);
  2551. break;
  2552. case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
  2553. ath10k_wmi_event_vdev_resume_req(ar, skb);
  2554. break;
  2555. case WMI_10_2_SERVICE_READY_EVENTID:
  2556. ath10k_wmi_event_service_ready(ar, skb);
  2557. break;
  2558. case WMI_10_2_READY_EVENTID:
  2559. ath10k_wmi_event_ready(ar, skb);
  2560. break;
  2561. case WMI_10_2_RTT_KEEPALIVE_EVENTID:
  2562. case WMI_10_2_GPIO_INPUT_EVENTID:
  2563. case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
  2564. case WMI_10_2_GENERIC_BUFFER_EVENTID:
  2565. case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
  2566. case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
  2567. case WMI_10_2_WDS_PEER_EVENTID:
  2568. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2569. "received event id %d not implemented\n", id);
  2570. break;
  2571. default:
  2572. ath10k_warn(ar, "Unknown eventid: %d\n", id);
  2573. break;
  2574. }
  2575. dev_kfree_skb(skb);
  2576. }
  2577. static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
  2578. {
  2579. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  2580. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  2581. ath10k_wmi_10_2_process_rx(ar, skb);
  2582. else
  2583. ath10k_wmi_10x_process_rx(ar, skb);
  2584. } else {
  2585. ath10k_wmi_main_process_rx(ar, skb);
  2586. }
  2587. }
  2588. int ath10k_wmi_connect(struct ath10k *ar)
  2589. {
  2590. int status;
  2591. struct ath10k_htc_svc_conn_req conn_req;
  2592. struct ath10k_htc_svc_conn_resp conn_resp;
  2593. memset(&conn_req, 0, sizeof(conn_req));
  2594. memset(&conn_resp, 0, sizeof(conn_resp));
  2595. /* these fields are the same for all service endpoints */
  2596. conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
  2597. conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
  2598. conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
  2599. /* connect to control service */
  2600. conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
  2601. status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
  2602. if (status) {
  2603. ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
  2604. status);
  2605. return status;
  2606. }
  2607. ar->wmi.eid = conn_resp.eid;
  2608. return 0;
  2609. }
  2610. static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
  2611. u16 rd2g, u16 rd5g, u16 ctl2g,
  2612. u16 ctl5g)
  2613. {
  2614. struct wmi_pdev_set_regdomain_cmd *cmd;
  2615. struct sk_buff *skb;
  2616. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2617. if (!skb)
  2618. return -ENOMEM;
  2619. cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
  2620. cmd->reg_domain = __cpu_to_le32(rd);
  2621. cmd->reg_domain_2G = __cpu_to_le32(rd2g);
  2622. cmd->reg_domain_5G = __cpu_to_le32(rd5g);
  2623. cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
  2624. cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
  2625. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2626. "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
  2627. rd, rd2g, rd5g, ctl2g, ctl5g);
  2628. return ath10k_wmi_cmd_send(ar, skb,
  2629. ar->wmi.cmd->pdev_set_regdomain_cmdid);
  2630. }
  2631. static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
  2632. u16 rd2g, u16 rd5g,
  2633. u16 ctl2g, u16 ctl5g,
  2634. enum wmi_dfs_region dfs_reg)
  2635. {
  2636. struct wmi_pdev_set_regdomain_cmd_10x *cmd;
  2637. struct sk_buff *skb;
  2638. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2639. if (!skb)
  2640. return -ENOMEM;
  2641. cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
  2642. cmd->reg_domain = __cpu_to_le32(rd);
  2643. cmd->reg_domain_2G = __cpu_to_le32(rd2g);
  2644. cmd->reg_domain_5G = __cpu_to_le32(rd5g);
  2645. cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
  2646. cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
  2647. cmd->dfs_domain = __cpu_to_le32(dfs_reg);
  2648. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2649. "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
  2650. rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
  2651. return ath10k_wmi_cmd_send(ar, skb,
  2652. ar->wmi.cmd->pdev_set_regdomain_cmdid);
  2653. }
  2654. int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
  2655. u16 rd5g, u16 ctl2g, u16 ctl5g,
  2656. enum wmi_dfs_region dfs_reg)
  2657. {
  2658. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  2659. return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
  2660. ctl2g, ctl5g, dfs_reg);
  2661. else
  2662. return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
  2663. ctl2g, ctl5g);
  2664. }
  2665. int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
  2666. {
  2667. struct wmi_pdev_suspend_cmd *cmd;
  2668. struct sk_buff *skb;
  2669. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2670. if (!skb)
  2671. return -ENOMEM;
  2672. cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
  2673. cmd->suspend_opt = __cpu_to_le32(suspend_opt);
  2674. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
  2675. }
  2676. int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
  2677. {
  2678. struct sk_buff *skb;
  2679. skb = ath10k_wmi_alloc_skb(ar, 0);
  2680. if (skb == NULL)
  2681. return -ENOMEM;
  2682. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
  2683. }
  2684. int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
  2685. {
  2686. struct wmi_pdev_set_param_cmd *cmd;
  2687. struct sk_buff *skb;
  2688. if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
  2689. ath10k_warn(ar, "pdev param %d not supported by firmware\n",
  2690. id);
  2691. return -EOPNOTSUPP;
  2692. }
  2693. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2694. if (!skb)
  2695. return -ENOMEM;
  2696. cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
  2697. cmd->param_id = __cpu_to_le32(id);
  2698. cmd->param_value = __cpu_to_le32(value);
  2699. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
  2700. id, value);
  2701. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
  2702. }
  2703. static void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
  2704. struct wmi_host_mem_chunks *chunks)
  2705. {
  2706. struct host_memory_chunk *chunk;
  2707. int i;
  2708. chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
  2709. for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  2710. chunk = &chunks->items[i];
  2711. chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
  2712. chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
  2713. chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
  2714. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2715. "wmi chunk %d len %d requested, addr 0x%llx\n",
  2716. i,
  2717. ar->wmi.mem_chunks[i].len,
  2718. (unsigned long long)ar->wmi.mem_chunks[i].paddr);
  2719. }
  2720. }
  2721. static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
  2722. {
  2723. struct wmi_init_cmd *cmd;
  2724. struct sk_buff *buf;
  2725. struct wmi_resource_config config = {};
  2726. u32 len, val;
  2727. config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
  2728. config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
  2729. config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
  2730. config.num_offload_reorder_bufs =
  2731. __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
  2732. config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
  2733. config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
  2734. config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
  2735. config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
  2736. config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
  2737. config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  2738. config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  2739. config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  2740. config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
  2741. config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
  2742. config.scan_max_pending_reqs =
  2743. __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
  2744. config.bmiss_offload_max_vdev =
  2745. __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
  2746. config.roam_offload_max_vdev =
  2747. __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
  2748. config.roam_offload_max_ap_profiles =
  2749. __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
  2750. config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
  2751. config.num_mcast_table_elems =
  2752. __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
  2753. config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
  2754. config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
  2755. config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
  2756. config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
  2757. config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
  2758. val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  2759. config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  2760. config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
  2761. config.gtk_offload_max_vdev =
  2762. __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
  2763. config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
  2764. config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
  2765. len = sizeof(*cmd) +
  2766. (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  2767. buf = ath10k_wmi_alloc_skb(ar, len);
  2768. if (!buf)
  2769. return -ENOMEM;
  2770. cmd = (struct wmi_init_cmd *)buf->data;
  2771. memcpy(&cmd->resource_config, &config, sizeof(config));
  2772. ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
  2773. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
  2774. return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
  2775. }
  2776. static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
  2777. {
  2778. struct wmi_init_cmd_10x *cmd;
  2779. struct sk_buff *buf;
  2780. struct wmi_resource_config_10x config = {};
  2781. u32 len, val;
  2782. config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
  2783. config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
  2784. config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
  2785. config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
  2786. config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
  2787. config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
  2788. config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
  2789. config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2790. config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2791. config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2792. config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
  2793. config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
  2794. config.scan_max_pending_reqs =
  2795. __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
  2796. config.bmiss_offload_max_vdev =
  2797. __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
  2798. config.roam_offload_max_vdev =
  2799. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
  2800. config.roam_offload_max_ap_profiles =
  2801. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
  2802. config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
  2803. config.num_mcast_table_elems =
  2804. __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
  2805. config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
  2806. config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
  2807. config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
  2808. config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
  2809. config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
  2810. val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  2811. config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  2812. config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
  2813. config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
  2814. config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
  2815. len = sizeof(*cmd) +
  2816. (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  2817. buf = ath10k_wmi_alloc_skb(ar, len);
  2818. if (!buf)
  2819. return -ENOMEM;
  2820. cmd = (struct wmi_init_cmd_10x *)buf->data;
  2821. memcpy(&cmd->resource_config, &config, sizeof(config));
  2822. ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
  2823. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
  2824. return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
  2825. }
  2826. static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
  2827. {
  2828. struct wmi_init_cmd_10_2 *cmd;
  2829. struct sk_buff *buf;
  2830. struct wmi_resource_config_10x config = {};
  2831. u32 len, val;
  2832. config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
  2833. config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
  2834. config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
  2835. config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
  2836. config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
  2837. config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
  2838. config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
  2839. config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2840. config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2841. config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2842. config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
  2843. config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
  2844. config.scan_max_pending_reqs =
  2845. __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
  2846. config.bmiss_offload_max_vdev =
  2847. __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
  2848. config.roam_offload_max_vdev =
  2849. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
  2850. config.roam_offload_max_ap_profiles =
  2851. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
  2852. config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
  2853. config.num_mcast_table_elems =
  2854. __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
  2855. config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
  2856. config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
  2857. config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
  2858. config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
  2859. config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
  2860. val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  2861. config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  2862. config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
  2863. config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
  2864. config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
  2865. len = sizeof(*cmd) +
  2866. (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  2867. buf = ath10k_wmi_alloc_skb(ar, len);
  2868. if (!buf)
  2869. return -ENOMEM;
  2870. cmd = (struct wmi_init_cmd_10_2 *)buf->data;
  2871. memcpy(&cmd->resource_config.common, &config, sizeof(config));
  2872. ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
  2873. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
  2874. return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
  2875. }
  2876. int ath10k_wmi_cmd_init(struct ath10k *ar)
  2877. {
  2878. int ret;
  2879. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  2880. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  2881. ret = ath10k_wmi_10_2_cmd_init(ar);
  2882. else
  2883. ret = ath10k_wmi_10x_cmd_init(ar);
  2884. } else {
  2885. ret = ath10k_wmi_main_cmd_init(ar);
  2886. }
  2887. return ret;
  2888. }
  2889. static int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
  2890. {
  2891. if (arg->ie_len && !arg->ie)
  2892. return -EINVAL;
  2893. if (arg->n_channels && !arg->channels)
  2894. return -EINVAL;
  2895. if (arg->n_ssids && !arg->ssids)
  2896. return -EINVAL;
  2897. if (arg->n_bssids && !arg->bssids)
  2898. return -EINVAL;
  2899. if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
  2900. return -EINVAL;
  2901. if (arg->n_channels > ARRAY_SIZE(arg->channels))
  2902. return -EINVAL;
  2903. if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
  2904. return -EINVAL;
  2905. if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
  2906. return -EINVAL;
  2907. return 0;
  2908. }
  2909. static size_t
  2910. ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
  2911. {
  2912. int len = 0;
  2913. if (arg->ie_len) {
  2914. len += sizeof(struct wmi_ie_data);
  2915. len += roundup(arg->ie_len, 4);
  2916. }
  2917. if (arg->n_channels) {
  2918. len += sizeof(struct wmi_chan_list);
  2919. len += sizeof(__le32) * arg->n_channels;
  2920. }
  2921. if (arg->n_ssids) {
  2922. len += sizeof(struct wmi_ssid_list);
  2923. len += sizeof(struct wmi_ssid) * arg->n_ssids;
  2924. }
  2925. if (arg->n_bssids) {
  2926. len += sizeof(struct wmi_bssid_list);
  2927. len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
  2928. }
  2929. return len;
  2930. }
  2931. static void
  2932. ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
  2933. const struct wmi_start_scan_arg *arg)
  2934. {
  2935. u32 scan_id;
  2936. u32 scan_req_id;
  2937. scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
  2938. scan_id |= arg->scan_id;
  2939. scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
  2940. scan_req_id |= arg->scan_req_id;
  2941. cmn->scan_id = __cpu_to_le32(scan_id);
  2942. cmn->scan_req_id = __cpu_to_le32(scan_req_id);
  2943. cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
  2944. cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
  2945. cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
  2946. cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
  2947. cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
  2948. cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
  2949. cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
  2950. cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
  2951. cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
  2952. cmn->idle_time = __cpu_to_le32(arg->idle_time);
  2953. cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
  2954. cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
  2955. cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
  2956. }
  2957. static void
  2958. ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
  2959. const struct wmi_start_scan_arg *arg)
  2960. {
  2961. struct wmi_ie_data *ie;
  2962. struct wmi_chan_list *channels;
  2963. struct wmi_ssid_list *ssids;
  2964. struct wmi_bssid_list *bssids;
  2965. void *ptr = tlvs->tlvs;
  2966. int i;
  2967. if (arg->n_channels) {
  2968. channels = ptr;
  2969. channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
  2970. channels->num_chan = __cpu_to_le32(arg->n_channels);
  2971. for (i = 0; i < arg->n_channels; i++)
  2972. channels->channel_list[i].freq =
  2973. __cpu_to_le16(arg->channels[i]);
  2974. ptr += sizeof(*channels);
  2975. ptr += sizeof(__le32) * arg->n_channels;
  2976. }
  2977. if (arg->n_ssids) {
  2978. ssids = ptr;
  2979. ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
  2980. ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
  2981. for (i = 0; i < arg->n_ssids; i++) {
  2982. ssids->ssids[i].ssid_len =
  2983. __cpu_to_le32(arg->ssids[i].len);
  2984. memcpy(&ssids->ssids[i].ssid,
  2985. arg->ssids[i].ssid,
  2986. arg->ssids[i].len);
  2987. }
  2988. ptr += sizeof(*ssids);
  2989. ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
  2990. }
  2991. if (arg->n_bssids) {
  2992. bssids = ptr;
  2993. bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
  2994. bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
  2995. for (i = 0; i < arg->n_bssids; i++)
  2996. memcpy(&bssids->bssid_list[i],
  2997. arg->bssids[i].bssid,
  2998. ETH_ALEN);
  2999. ptr += sizeof(*bssids);
  3000. ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
  3001. }
  3002. if (arg->ie_len) {
  3003. ie = ptr;
  3004. ie->tag = __cpu_to_le32(WMI_IE_TAG);
  3005. ie->ie_len = __cpu_to_le32(arg->ie_len);
  3006. memcpy(ie->ie_data, arg->ie, arg->ie_len);
  3007. ptr += sizeof(*ie);
  3008. ptr += roundup(arg->ie_len, 4);
  3009. }
  3010. }
  3011. int ath10k_wmi_start_scan(struct ath10k *ar,
  3012. const struct wmi_start_scan_arg *arg)
  3013. {
  3014. struct sk_buff *skb;
  3015. size_t len;
  3016. int ret;
  3017. ret = ath10k_wmi_start_scan_verify(arg);
  3018. if (ret)
  3019. return ret;
  3020. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  3021. len = sizeof(struct wmi_10x_start_scan_cmd) +
  3022. ath10k_wmi_start_scan_tlvs_len(arg);
  3023. else
  3024. len = sizeof(struct wmi_start_scan_cmd) +
  3025. ath10k_wmi_start_scan_tlvs_len(arg);
  3026. skb = ath10k_wmi_alloc_skb(ar, len);
  3027. if (!skb)
  3028. return -ENOMEM;
  3029. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  3030. struct wmi_10x_start_scan_cmd *cmd;
  3031. cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
  3032. ath10k_wmi_put_start_scan_common(&cmd->common, arg);
  3033. ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
  3034. } else {
  3035. struct wmi_start_scan_cmd *cmd;
  3036. cmd = (struct wmi_start_scan_cmd *)skb->data;
  3037. cmd->burst_duration_ms = __cpu_to_le32(0);
  3038. ath10k_wmi_put_start_scan_common(&cmd->common, arg);
  3039. ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
  3040. }
  3041. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
  3042. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
  3043. }
  3044. void ath10k_wmi_start_scan_init(struct ath10k *ar,
  3045. struct wmi_start_scan_arg *arg)
  3046. {
  3047. /* setup commonly used values */
  3048. arg->scan_req_id = 1;
  3049. arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
  3050. arg->dwell_time_active = 50;
  3051. arg->dwell_time_passive = 150;
  3052. arg->min_rest_time = 50;
  3053. arg->max_rest_time = 500;
  3054. arg->repeat_probe_time = 0;
  3055. arg->probe_spacing_time = 0;
  3056. arg->idle_time = 0;
  3057. arg->max_scan_time = 20000;
  3058. arg->probe_delay = 5;
  3059. arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
  3060. | WMI_SCAN_EVENT_COMPLETED
  3061. | WMI_SCAN_EVENT_BSS_CHANNEL
  3062. | WMI_SCAN_EVENT_FOREIGN_CHANNEL
  3063. | WMI_SCAN_EVENT_DEQUEUED;
  3064. arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
  3065. arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
  3066. arg->n_bssids = 1;
  3067. arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
  3068. }
  3069. int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
  3070. {
  3071. struct wmi_stop_scan_cmd *cmd;
  3072. struct sk_buff *skb;
  3073. u32 scan_id;
  3074. u32 req_id;
  3075. if (arg->req_id > 0xFFF)
  3076. return -EINVAL;
  3077. if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
  3078. return -EINVAL;
  3079. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3080. if (!skb)
  3081. return -ENOMEM;
  3082. scan_id = arg->u.scan_id;
  3083. scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
  3084. req_id = arg->req_id;
  3085. req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
  3086. cmd = (struct wmi_stop_scan_cmd *)skb->data;
  3087. cmd->req_type = __cpu_to_le32(arg->req_type);
  3088. cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
  3089. cmd->scan_id = __cpu_to_le32(scan_id);
  3090. cmd->scan_req_id = __cpu_to_le32(req_id);
  3091. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3092. "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
  3093. arg->req_id, arg->req_type, arg->u.scan_id);
  3094. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
  3095. }
  3096. int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
  3097. enum wmi_vdev_type type,
  3098. enum wmi_vdev_subtype subtype,
  3099. const u8 macaddr[ETH_ALEN])
  3100. {
  3101. struct wmi_vdev_create_cmd *cmd;
  3102. struct sk_buff *skb;
  3103. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3104. if (!skb)
  3105. return -ENOMEM;
  3106. cmd = (struct wmi_vdev_create_cmd *)skb->data;
  3107. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3108. cmd->vdev_type = __cpu_to_le32(type);
  3109. cmd->vdev_subtype = __cpu_to_le32(subtype);
  3110. ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
  3111. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3112. "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
  3113. vdev_id, type, subtype, macaddr);
  3114. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
  3115. }
  3116. int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
  3117. {
  3118. struct wmi_vdev_delete_cmd *cmd;
  3119. struct sk_buff *skb;
  3120. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3121. if (!skb)
  3122. return -ENOMEM;
  3123. cmd = (struct wmi_vdev_delete_cmd *)skb->data;
  3124. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3125. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3126. "WMI vdev delete id %d\n", vdev_id);
  3127. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
  3128. }
  3129. static int
  3130. ath10k_wmi_vdev_start_restart(struct ath10k *ar,
  3131. const struct wmi_vdev_start_request_arg *arg,
  3132. u32 cmd_id)
  3133. {
  3134. struct wmi_vdev_start_request_cmd *cmd;
  3135. struct sk_buff *skb;
  3136. const char *cmdname;
  3137. u32 flags = 0;
  3138. if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
  3139. cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
  3140. return -EINVAL;
  3141. if (WARN_ON(arg->ssid && arg->ssid_len == 0))
  3142. return -EINVAL;
  3143. if (WARN_ON(arg->hidden_ssid && !arg->ssid))
  3144. return -EINVAL;
  3145. if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
  3146. return -EINVAL;
  3147. if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
  3148. cmdname = "start";
  3149. else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
  3150. cmdname = "restart";
  3151. else
  3152. return -EINVAL; /* should not happen, we already check cmd_id */
  3153. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3154. if (!skb)
  3155. return -ENOMEM;
  3156. if (arg->hidden_ssid)
  3157. flags |= WMI_VDEV_START_HIDDEN_SSID;
  3158. if (arg->pmf_enabled)
  3159. flags |= WMI_VDEV_START_PMF_ENABLED;
  3160. cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
  3161. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3162. cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
  3163. cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
  3164. cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
  3165. cmd->flags = __cpu_to_le32(flags);
  3166. cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
  3167. cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
  3168. if (arg->ssid) {
  3169. cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
  3170. memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
  3171. }
  3172. ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
  3173. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3174. "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
  3175. cmdname, arg->vdev_id,
  3176. flags, arg->channel.freq, arg->channel.mode,
  3177. cmd->chan.flags, arg->channel.max_power);
  3178. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  3179. }
  3180. int ath10k_wmi_vdev_start(struct ath10k *ar,
  3181. const struct wmi_vdev_start_request_arg *arg)
  3182. {
  3183. u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
  3184. return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
  3185. }
  3186. int ath10k_wmi_vdev_restart(struct ath10k *ar,
  3187. const struct wmi_vdev_start_request_arg *arg)
  3188. {
  3189. u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
  3190. return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
  3191. }
  3192. int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
  3193. {
  3194. struct wmi_vdev_stop_cmd *cmd;
  3195. struct sk_buff *skb;
  3196. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3197. if (!skb)
  3198. return -ENOMEM;
  3199. cmd = (struct wmi_vdev_stop_cmd *)skb->data;
  3200. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3201. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
  3202. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
  3203. }
  3204. int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
  3205. {
  3206. struct wmi_vdev_up_cmd *cmd;
  3207. struct sk_buff *skb;
  3208. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3209. if (!skb)
  3210. return -ENOMEM;
  3211. cmd = (struct wmi_vdev_up_cmd *)skb->data;
  3212. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3213. cmd->vdev_assoc_id = __cpu_to_le32(aid);
  3214. ether_addr_copy(cmd->vdev_bssid.addr, bssid);
  3215. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3216. "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
  3217. vdev_id, aid, bssid);
  3218. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
  3219. }
  3220. int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
  3221. {
  3222. struct wmi_vdev_down_cmd *cmd;
  3223. struct sk_buff *skb;
  3224. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3225. if (!skb)
  3226. return -ENOMEM;
  3227. cmd = (struct wmi_vdev_down_cmd *)skb->data;
  3228. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3229. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3230. "wmi mgmt vdev down id 0x%x\n", vdev_id);
  3231. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
  3232. }
  3233. int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
  3234. u32 param_id, u32 param_value)
  3235. {
  3236. struct wmi_vdev_set_param_cmd *cmd;
  3237. struct sk_buff *skb;
  3238. if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
  3239. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3240. "vdev param %d not supported by firmware\n",
  3241. param_id);
  3242. return -EOPNOTSUPP;
  3243. }
  3244. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3245. if (!skb)
  3246. return -ENOMEM;
  3247. cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
  3248. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3249. cmd->param_id = __cpu_to_le32(param_id);
  3250. cmd->param_value = __cpu_to_le32(param_value);
  3251. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3252. "wmi vdev id 0x%x set param %d value %d\n",
  3253. vdev_id, param_id, param_value);
  3254. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
  3255. }
  3256. int ath10k_wmi_vdev_install_key(struct ath10k *ar,
  3257. const struct wmi_vdev_install_key_arg *arg)
  3258. {
  3259. struct wmi_vdev_install_key_cmd *cmd;
  3260. struct sk_buff *skb;
  3261. if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
  3262. return -EINVAL;
  3263. if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
  3264. return -EINVAL;
  3265. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
  3266. if (!skb)
  3267. return -ENOMEM;
  3268. cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
  3269. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3270. cmd->key_idx = __cpu_to_le32(arg->key_idx);
  3271. cmd->key_flags = __cpu_to_le32(arg->key_flags);
  3272. cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
  3273. cmd->key_len = __cpu_to_le32(arg->key_len);
  3274. cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
  3275. cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
  3276. if (arg->macaddr)
  3277. ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
  3278. if (arg->key_data)
  3279. memcpy(cmd->key_data, arg->key_data, arg->key_len);
  3280. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3281. "wmi vdev install key idx %d cipher %d len %d\n",
  3282. arg->key_idx, arg->key_cipher, arg->key_len);
  3283. return ath10k_wmi_cmd_send(ar, skb,
  3284. ar->wmi.cmd->vdev_install_key_cmdid);
  3285. }
  3286. int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
  3287. const struct wmi_vdev_spectral_conf_arg *arg)
  3288. {
  3289. struct wmi_vdev_spectral_conf_cmd *cmd;
  3290. struct sk_buff *skb;
  3291. u32 cmdid;
  3292. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3293. if (!skb)
  3294. return -ENOMEM;
  3295. cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
  3296. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3297. cmd->scan_count = __cpu_to_le32(arg->scan_count);
  3298. cmd->scan_period = __cpu_to_le32(arg->scan_period);
  3299. cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
  3300. cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
  3301. cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
  3302. cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
  3303. cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
  3304. cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
  3305. cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
  3306. cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
  3307. cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
  3308. cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
  3309. cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
  3310. cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
  3311. cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
  3312. cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
  3313. cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
  3314. cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
  3315. cmdid = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
  3316. return ath10k_wmi_cmd_send(ar, skb, cmdid);
  3317. }
  3318. int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
  3319. u32 enable)
  3320. {
  3321. struct wmi_vdev_spectral_enable_cmd *cmd;
  3322. struct sk_buff *skb;
  3323. u32 cmdid;
  3324. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3325. if (!skb)
  3326. return -ENOMEM;
  3327. cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
  3328. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3329. cmd->trigger_cmd = __cpu_to_le32(trigger);
  3330. cmd->enable_cmd = __cpu_to_le32(enable);
  3331. cmdid = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
  3332. return ath10k_wmi_cmd_send(ar, skb, cmdid);
  3333. }
  3334. int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
  3335. const u8 peer_addr[ETH_ALEN])
  3336. {
  3337. struct wmi_peer_create_cmd *cmd;
  3338. struct sk_buff *skb;
  3339. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3340. if (!skb)
  3341. return -ENOMEM;
  3342. cmd = (struct wmi_peer_create_cmd *)skb->data;
  3343. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3344. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3345. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3346. "wmi peer create vdev_id %d peer_addr %pM\n",
  3347. vdev_id, peer_addr);
  3348. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
  3349. }
  3350. int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
  3351. const u8 peer_addr[ETH_ALEN])
  3352. {
  3353. struct wmi_peer_delete_cmd *cmd;
  3354. struct sk_buff *skb;
  3355. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3356. if (!skb)
  3357. return -ENOMEM;
  3358. cmd = (struct wmi_peer_delete_cmd *)skb->data;
  3359. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3360. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3361. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3362. "wmi peer delete vdev_id %d peer_addr %pM\n",
  3363. vdev_id, peer_addr);
  3364. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
  3365. }
  3366. int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
  3367. const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  3368. {
  3369. struct wmi_peer_flush_tids_cmd *cmd;
  3370. struct sk_buff *skb;
  3371. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3372. if (!skb)
  3373. return -ENOMEM;
  3374. cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
  3375. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3376. cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
  3377. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3378. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3379. "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
  3380. vdev_id, peer_addr, tid_bitmap);
  3381. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
  3382. }
  3383. int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
  3384. const u8 *peer_addr, enum wmi_peer_param param_id,
  3385. u32 param_value)
  3386. {
  3387. struct wmi_peer_set_param_cmd *cmd;
  3388. struct sk_buff *skb;
  3389. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3390. if (!skb)
  3391. return -ENOMEM;
  3392. cmd = (struct wmi_peer_set_param_cmd *)skb->data;
  3393. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3394. cmd->param_id = __cpu_to_le32(param_id);
  3395. cmd->param_value = __cpu_to_le32(param_value);
  3396. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3397. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3398. "wmi vdev %d peer 0x%pM set param %d value %d\n",
  3399. vdev_id, peer_addr, param_id, param_value);
  3400. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
  3401. }
  3402. int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
  3403. enum wmi_sta_ps_mode psmode)
  3404. {
  3405. struct wmi_sta_powersave_mode_cmd *cmd;
  3406. struct sk_buff *skb;
  3407. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3408. if (!skb)
  3409. return -ENOMEM;
  3410. cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
  3411. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3412. cmd->sta_ps_mode = __cpu_to_le32(psmode);
  3413. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3414. "wmi set powersave id 0x%x mode %d\n",
  3415. vdev_id, psmode);
  3416. return ath10k_wmi_cmd_send(ar, skb,
  3417. ar->wmi.cmd->sta_powersave_mode_cmdid);
  3418. }
  3419. int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
  3420. enum wmi_sta_powersave_param param_id,
  3421. u32 value)
  3422. {
  3423. struct wmi_sta_powersave_param_cmd *cmd;
  3424. struct sk_buff *skb;
  3425. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3426. if (!skb)
  3427. return -ENOMEM;
  3428. cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
  3429. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3430. cmd->param_id = __cpu_to_le32(param_id);
  3431. cmd->param_value = __cpu_to_le32(value);
  3432. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3433. "wmi sta ps param vdev_id 0x%x param %d value %d\n",
  3434. vdev_id, param_id, value);
  3435. return ath10k_wmi_cmd_send(ar, skb,
  3436. ar->wmi.cmd->sta_powersave_param_cmdid);
  3437. }
  3438. int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  3439. enum wmi_ap_ps_peer_param param_id, u32 value)
  3440. {
  3441. struct wmi_ap_ps_peer_cmd *cmd;
  3442. struct sk_buff *skb;
  3443. if (!mac)
  3444. return -EINVAL;
  3445. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3446. if (!skb)
  3447. return -ENOMEM;
  3448. cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
  3449. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3450. cmd->param_id = __cpu_to_le32(param_id);
  3451. cmd->param_value = __cpu_to_le32(value);
  3452. ether_addr_copy(cmd->peer_macaddr.addr, mac);
  3453. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3454. "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
  3455. vdev_id, param_id, value, mac);
  3456. return ath10k_wmi_cmd_send(ar, skb,
  3457. ar->wmi.cmd->ap_ps_peer_param_cmdid);
  3458. }
  3459. int ath10k_wmi_scan_chan_list(struct ath10k *ar,
  3460. const struct wmi_scan_chan_list_arg *arg)
  3461. {
  3462. struct wmi_scan_chan_list_cmd *cmd;
  3463. struct sk_buff *skb;
  3464. struct wmi_channel_arg *ch;
  3465. struct wmi_channel *ci;
  3466. int len;
  3467. int i;
  3468. len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
  3469. skb = ath10k_wmi_alloc_skb(ar, len);
  3470. if (!skb)
  3471. return -EINVAL;
  3472. cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
  3473. cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
  3474. for (i = 0; i < arg->n_channels; i++) {
  3475. ch = &arg->channels[i];
  3476. ci = &cmd->chan_info[i];
  3477. ath10k_wmi_put_wmi_channel(ci, ch);
  3478. }
  3479. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
  3480. }
  3481. static void
  3482. ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
  3483. const struct wmi_peer_assoc_complete_arg *arg)
  3484. {
  3485. struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
  3486. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3487. cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
  3488. cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
  3489. cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
  3490. cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
  3491. cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
  3492. cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
  3493. cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
  3494. cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
  3495. cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
  3496. cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
  3497. cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
  3498. cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
  3499. ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
  3500. cmd->peer_legacy_rates.num_rates =
  3501. __cpu_to_le32(arg->peer_legacy_rates.num_rates);
  3502. memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
  3503. arg->peer_legacy_rates.num_rates);
  3504. cmd->peer_ht_rates.num_rates =
  3505. __cpu_to_le32(arg->peer_ht_rates.num_rates);
  3506. memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
  3507. arg->peer_ht_rates.num_rates);
  3508. cmd->peer_vht_rates.rx_max_rate =
  3509. __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
  3510. cmd->peer_vht_rates.rx_mcs_set =
  3511. __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
  3512. cmd->peer_vht_rates.tx_max_rate =
  3513. __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
  3514. cmd->peer_vht_rates.tx_mcs_set =
  3515. __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
  3516. }
  3517. static void
  3518. ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
  3519. const struct wmi_peer_assoc_complete_arg *arg)
  3520. {
  3521. struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
  3522. ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  3523. memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
  3524. }
  3525. static void
  3526. ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
  3527. const struct wmi_peer_assoc_complete_arg *arg)
  3528. {
  3529. ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  3530. }
  3531. static void
  3532. ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
  3533. const struct wmi_peer_assoc_complete_arg *arg)
  3534. {
  3535. struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
  3536. int max_mcs, max_nss;
  3537. u32 info0;
  3538. /* TODO: Is using max values okay with firmware? */
  3539. max_mcs = 0xf;
  3540. max_nss = 0xf;
  3541. info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
  3542. SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
  3543. ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  3544. cmd->info0 = __cpu_to_le32(info0);
  3545. }
  3546. int ath10k_wmi_peer_assoc(struct ath10k *ar,
  3547. const struct wmi_peer_assoc_complete_arg *arg)
  3548. {
  3549. struct sk_buff *skb;
  3550. int len;
  3551. if (arg->peer_mpdu_density > 16)
  3552. return -EINVAL;
  3553. if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
  3554. return -EINVAL;
  3555. if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
  3556. return -EINVAL;
  3557. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  3558. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  3559. len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
  3560. else
  3561. len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
  3562. } else {
  3563. len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
  3564. }
  3565. skb = ath10k_wmi_alloc_skb(ar, len);
  3566. if (!skb)
  3567. return -ENOMEM;
  3568. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  3569. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  3570. ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
  3571. else
  3572. ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
  3573. } else {
  3574. ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
  3575. }
  3576. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3577. "wmi peer assoc vdev %d addr %pM (%s)\n",
  3578. arg->vdev_id, arg->addr,
  3579. arg->peer_reassoc ? "reassociate" : "new");
  3580. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
  3581. }
  3582. /* This function assumes the beacon is already DMA mapped */
  3583. int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
  3584. {
  3585. struct wmi_bcn_tx_ref_cmd *cmd;
  3586. struct sk_buff *skb;
  3587. struct sk_buff *beacon = arvif->beacon;
  3588. struct ath10k *ar = arvif->ar;
  3589. struct ieee80211_hdr *hdr;
  3590. int ret;
  3591. u16 fc;
  3592. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3593. if (!skb)
  3594. return -ENOMEM;
  3595. hdr = (struct ieee80211_hdr *)beacon->data;
  3596. fc = le16_to_cpu(hdr->frame_control);
  3597. cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
  3598. cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
  3599. cmd->data_len = __cpu_to_le32(beacon->len);
  3600. cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr);
  3601. cmd->msdu_id = 0;
  3602. cmd->frame_control = __cpu_to_le32(fc);
  3603. cmd->flags = 0;
  3604. cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
  3605. if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero)
  3606. cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
  3607. if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab)
  3608. cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
  3609. ret = ath10k_wmi_cmd_send_nowait(ar, skb,
  3610. ar->wmi.cmd->pdev_send_bcn_cmdid);
  3611. if (ret)
  3612. dev_kfree_skb(skb);
  3613. return ret;
  3614. }
  3615. static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
  3616. const struct wmi_wmm_params_arg *arg)
  3617. {
  3618. params->cwmin = __cpu_to_le32(arg->cwmin);
  3619. params->cwmax = __cpu_to_le32(arg->cwmax);
  3620. params->aifs = __cpu_to_le32(arg->aifs);
  3621. params->txop = __cpu_to_le32(arg->txop);
  3622. params->acm = __cpu_to_le32(arg->acm);
  3623. params->no_ack = __cpu_to_le32(arg->no_ack);
  3624. }
  3625. int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
  3626. const struct wmi_pdev_set_wmm_params_arg *arg)
  3627. {
  3628. struct wmi_pdev_set_wmm_params *cmd;
  3629. struct sk_buff *skb;
  3630. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3631. if (!skb)
  3632. return -ENOMEM;
  3633. cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
  3634. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
  3635. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
  3636. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
  3637. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
  3638. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
  3639. return ath10k_wmi_cmd_send(ar, skb,
  3640. ar->wmi.cmd->pdev_set_wmm_params_cmdid);
  3641. }
  3642. int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
  3643. {
  3644. struct wmi_request_stats_cmd *cmd;
  3645. struct sk_buff *skb;
  3646. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3647. if (!skb)
  3648. return -ENOMEM;
  3649. cmd = (struct wmi_request_stats_cmd *)skb->data;
  3650. cmd->stats_id = __cpu_to_le32(stats_id);
  3651. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
  3652. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
  3653. }
  3654. int ath10k_wmi_force_fw_hang(struct ath10k *ar,
  3655. enum wmi_force_fw_hang_type type, u32 delay_ms)
  3656. {
  3657. struct wmi_force_fw_hang_cmd *cmd;
  3658. struct sk_buff *skb;
  3659. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3660. if (!skb)
  3661. return -ENOMEM;
  3662. cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
  3663. cmd->type = __cpu_to_le32(type);
  3664. cmd->delay_ms = __cpu_to_le32(delay_ms);
  3665. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
  3666. type, delay_ms);
  3667. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
  3668. }
  3669. int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
  3670. {
  3671. struct wmi_dbglog_cfg_cmd *cmd;
  3672. struct sk_buff *skb;
  3673. u32 cfg;
  3674. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3675. if (!skb)
  3676. return -ENOMEM;
  3677. cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
  3678. if (module_enable) {
  3679. cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE,
  3680. ATH10K_DBGLOG_CFG_LOG_LVL);
  3681. } else {
  3682. /* set back defaults, all modules with WARN level */
  3683. cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
  3684. ATH10K_DBGLOG_CFG_LOG_LVL);
  3685. module_enable = ~0;
  3686. }
  3687. cmd->module_enable = __cpu_to_le32(module_enable);
  3688. cmd->module_valid = __cpu_to_le32(~0);
  3689. cmd->config_enable = __cpu_to_le32(cfg);
  3690. cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
  3691. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3692. "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
  3693. __le32_to_cpu(cmd->module_enable),
  3694. __le32_to_cpu(cmd->module_valid),
  3695. __le32_to_cpu(cmd->config_enable),
  3696. __le32_to_cpu(cmd->config_valid));
  3697. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
  3698. }
  3699. int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
  3700. {
  3701. struct wmi_pdev_pktlog_enable_cmd *cmd;
  3702. struct sk_buff *skb;
  3703. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3704. if (!skb)
  3705. return -ENOMEM;
  3706. ev_bitmap &= ATH10K_PKTLOG_ANY;
  3707. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3708. "wmi enable pktlog filter:%x\n", ev_bitmap);
  3709. cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
  3710. cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
  3711. return ath10k_wmi_cmd_send(ar, skb,
  3712. ar->wmi.cmd->pdev_pktlog_enable_cmdid);
  3713. }
  3714. int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
  3715. {
  3716. struct sk_buff *skb;
  3717. skb = ath10k_wmi_alloc_skb(ar, 0);
  3718. if (!skb)
  3719. return -ENOMEM;
  3720. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
  3721. return ath10k_wmi_cmd_send(ar, skb,
  3722. ar->wmi.cmd->pdev_pktlog_disable_cmdid);
  3723. }
  3724. int ath10k_wmi_attach(struct ath10k *ar)
  3725. {
  3726. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  3727. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  3728. ar->wmi.cmd = &wmi_10_2_cmd_map;
  3729. else
  3730. ar->wmi.cmd = &wmi_10x_cmd_map;
  3731. ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
  3732. ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
  3733. } else {
  3734. ar->wmi.cmd = &wmi_cmd_map;
  3735. ar->wmi.vdev_param = &wmi_vdev_param_map;
  3736. ar->wmi.pdev_param = &wmi_pdev_param_map;
  3737. }
  3738. init_completion(&ar->wmi.service_ready);
  3739. init_completion(&ar->wmi.unified_ready);
  3740. init_waitqueue_head(&ar->wmi.tx_credits_wq);
  3741. return 0;
  3742. }
  3743. void ath10k_wmi_detach(struct ath10k *ar)
  3744. {
  3745. int i;
  3746. /* free the host memory chunks requested by firmware */
  3747. for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  3748. dma_free_coherent(ar->dev,
  3749. ar->wmi.mem_chunks[i].len,
  3750. ar->wmi.mem_chunks[i].vaddr,
  3751. ar->wmi.mem_chunks[i].paddr);
  3752. }
  3753. ar->wmi.num_mem_chunks = 0;
  3754. }