wmi.c 172 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <linux/skbuff.h>
  18. #include <linux/ctype.h>
  19. #include "core.h"
  20. #include "htc.h"
  21. #include "debug.h"
  22. #include "wmi.h"
  23. #include "wmi-tlv.h"
  24. #include "mac.h"
  25. #include "testmode.h"
  26. #include "wmi-ops.h"
  27. /* MAIN WMI cmd track */
  28. static struct wmi_cmd_map wmi_cmd_map = {
  29. .init_cmdid = WMI_INIT_CMDID,
  30. .start_scan_cmdid = WMI_START_SCAN_CMDID,
  31. .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
  32. .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
  33. .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
  34. .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
  35. .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
  36. .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
  37. .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
  38. .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
  39. .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
  40. .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
  41. .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
  42. .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
  43. .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
  44. .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  45. .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
  46. .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
  47. .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
  48. .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
  49. .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
  50. .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
  51. .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
  52. .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
  53. .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
  54. .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
  55. .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
  56. .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
  57. .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
  58. .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
  59. .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
  60. .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
  61. .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
  62. .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
  63. .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
  64. .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
  65. .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
  66. .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
  67. .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
  68. .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
  69. .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
  70. .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
  71. .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
  72. .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
  73. .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
  74. .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
  75. .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
  76. .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
  77. .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
  78. .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
  79. .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
  80. .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
  81. .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
  82. .roam_scan_mode = WMI_ROAM_SCAN_MODE,
  83. .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
  84. .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
  85. .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  86. .roam_ap_profile = WMI_ROAM_AP_PROFILE,
  87. .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
  88. .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
  89. .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
  90. .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
  91. .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
  92. .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
  93. .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
  94. .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
  95. .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
  96. .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
  97. .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
  98. .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
  99. .wlan_profile_set_hist_intvl_cmdid =
  100. WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  101. .wlan_profile_get_profile_data_cmdid =
  102. WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  103. .wlan_profile_enable_profile_id_cmdid =
  104. WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  105. .wlan_profile_list_profile_id_cmdid =
  106. WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  107. .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
  108. .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
  109. .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
  110. .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
  111. .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
  112. .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
  113. .wow_enable_disable_wake_event_cmdid =
  114. WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  115. .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
  116. .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  117. .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
  118. .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
  119. .vdev_spectral_scan_configure_cmdid =
  120. WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  121. .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  122. .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
  123. .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
  124. .network_list_offload_config_cmdid =
  125. WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
  126. .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
  127. .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
  128. .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
  129. .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
  130. .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
  131. .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
  132. .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
  133. .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
  134. .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
  135. .echo_cmdid = WMI_ECHO_CMDID,
  136. .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
  137. .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
  138. .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
  139. .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
  140. .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
  141. .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
  142. .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
  143. .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
  144. .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
  145. .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
  146. };
  147. /* 10.X WMI cmd track */
  148. static struct wmi_cmd_map wmi_10x_cmd_map = {
  149. .init_cmdid = WMI_10X_INIT_CMDID,
  150. .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
  151. .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
  152. .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
  153. .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
  154. .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
  155. .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
  156. .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
  157. .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
  158. .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
  159. .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
  160. .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
  161. .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
  162. .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
  163. .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
  164. .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  165. .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
  166. .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
  167. .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
  168. .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
  169. .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
  170. .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
  171. .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
  172. .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
  173. .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
  174. .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
  175. .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
  176. .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
  177. .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
  178. .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
  179. .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
  180. .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
  181. .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
  182. .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
  183. .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
  184. .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
  185. .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
  186. .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  187. .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
  188. .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
  189. .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
  190. .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  191. .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
  192. .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
  193. .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
  194. .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
  195. .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
  196. .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
  197. .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
  198. .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
  199. .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
  200. .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
  201. .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
  202. .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
  203. .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
  204. .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
  205. .roam_scan_rssi_change_threshold =
  206. WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  207. .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
  208. .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
  209. .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
  210. .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
  211. .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
  212. .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
  213. .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
  214. .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
  215. .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
  216. .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
  217. .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
  218. .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
  219. .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
  220. .wlan_profile_set_hist_intvl_cmdid =
  221. WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  222. .wlan_profile_get_profile_data_cmdid =
  223. WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  224. .wlan_profile_enable_profile_id_cmdid =
  225. WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  226. .wlan_profile_list_profile_id_cmdid =
  227. WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  228. .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
  229. .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
  230. .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
  231. .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
  232. .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
  233. .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
  234. .wow_enable_disable_wake_event_cmdid =
  235. WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  236. .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
  237. .wow_hostwakeup_from_sleep_cmdid =
  238. WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  239. .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
  240. .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
  241. .vdev_spectral_scan_configure_cmdid =
  242. WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  243. .vdev_spectral_scan_enable_cmdid =
  244. WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  245. .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
  246. .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
  247. .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
  248. .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
  249. .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
  250. .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
  251. .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
  252. .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
  253. .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
  254. .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
  255. .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
  256. .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
  257. .echo_cmdid = WMI_10X_ECHO_CMDID,
  258. .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
  259. .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
  260. .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
  261. .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
  262. .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  263. .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  264. .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
  265. .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
  266. .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
  267. .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
  268. };
  269. /* 10.2.4 WMI cmd track */
  270. static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
  271. .init_cmdid = WMI_10_2_INIT_CMDID,
  272. .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
  273. .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
  274. .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
  275. .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
  276. .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
  277. .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
  278. .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
  279. .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
  280. .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
  281. .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
  282. .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
  283. .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
  284. .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
  285. .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  286. .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
  287. .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
  288. .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
  289. .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
  290. .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
  291. .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
  292. .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
  293. .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
  294. .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
  295. .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
  296. .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
  297. .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
  298. .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
  299. .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
  300. .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
  301. .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
  302. .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
  303. .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
  304. .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
  305. .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
  306. .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
  307. .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  308. .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
  309. .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
  310. .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
  311. .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  312. .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
  313. .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
  314. .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
  315. .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
  316. .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
  317. .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
  318. .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
  319. .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
  320. .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
  321. .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
  322. .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
  323. .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
  324. .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
  325. .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
  326. .roam_scan_rssi_change_threshold =
  327. WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  328. .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
  329. .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
  330. .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
  331. .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
  332. .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
  333. .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
  334. .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
  335. .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
  336. .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
  337. .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
  338. .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
  339. .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
  340. .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
  341. .wlan_profile_set_hist_intvl_cmdid =
  342. WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  343. .wlan_profile_get_profile_data_cmdid =
  344. WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  345. .wlan_profile_enable_profile_id_cmdid =
  346. WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  347. .wlan_profile_list_profile_id_cmdid =
  348. WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  349. .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
  350. .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
  351. .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
  352. .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
  353. .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
  354. .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
  355. .wow_enable_disable_wake_event_cmdid =
  356. WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  357. .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
  358. .wow_hostwakeup_from_sleep_cmdid =
  359. WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  360. .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
  361. .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
  362. .vdev_spectral_scan_configure_cmdid =
  363. WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  364. .vdev_spectral_scan_enable_cmdid =
  365. WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  366. .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
  367. .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
  368. .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
  369. .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
  370. .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
  371. .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
  372. .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
  373. .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
  374. .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
  375. .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
  376. .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
  377. .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
  378. .echo_cmdid = WMI_10_2_ECHO_CMDID,
  379. .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
  380. .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
  381. .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
  382. .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
  383. .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  384. .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  385. .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
  386. .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
  387. .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
  388. .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
  389. };
  390. /* MAIN WMI VDEV param map */
  391. static struct wmi_vdev_param_map wmi_vdev_param_map = {
  392. .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
  393. .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
  394. .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
  395. .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
  396. .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
  397. .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
  398. .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
  399. .preamble = WMI_VDEV_PARAM_PREAMBLE,
  400. .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
  401. .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
  402. .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
  403. .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
  404. .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
  405. .wmi_vdev_oc_scheduler_air_time_limit =
  406. WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
  407. .wds = WMI_VDEV_PARAM_WDS,
  408. .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
  409. .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
  410. .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
  411. .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
  412. .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
  413. .chwidth = WMI_VDEV_PARAM_CHWIDTH,
  414. .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
  415. .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
  416. .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
  417. .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
  418. .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
  419. .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
  420. .sgi = WMI_VDEV_PARAM_SGI,
  421. .ldpc = WMI_VDEV_PARAM_LDPC,
  422. .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
  423. .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
  424. .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
  425. .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
  426. .nss = WMI_VDEV_PARAM_NSS,
  427. .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
  428. .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
  429. .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
  430. .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
  431. .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
  432. .ap_keepalive_min_idle_inactive_time_secs =
  433. WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
  434. .ap_keepalive_max_idle_inactive_time_secs =
  435. WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
  436. .ap_keepalive_max_unresponsive_time_secs =
  437. WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
  438. .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
  439. .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
  440. .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
  441. .txbf = WMI_VDEV_PARAM_TXBF,
  442. .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
  443. .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
  444. .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
  445. .ap_detect_out_of_sync_sleeping_sta_time_secs =
  446. WMI_VDEV_PARAM_UNSUPPORTED,
  447. };
  448. /* 10.X WMI VDEV param map */
  449. static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
  450. .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
  451. .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
  452. .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
  453. .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
  454. .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
  455. .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
  456. .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
  457. .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
  458. .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
  459. .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
  460. .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
  461. .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
  462. .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
  463. .wmi_vdev_oc_scheduler_air_time_limit =
  464. WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
  465. .wds = WMI_10X_VDEV_PARAM_WDS,
  466. .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
  467. .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
  468. .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
  469. .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
  470. .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
  471. .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
  472. .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
  473. .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
  474. .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
  475. .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
  476. .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
  477. .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
  478. .sgi = WMI_10X_VDEV_PARAM_SGI,
  479. .ldpc = WMI_10X_VDEV_PARAM_LDPC,
  480. .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
  481. .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
  482. .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
  483. .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
  484. .nss = WMI_10X_VDEV_PARAM_NSS,
  485. .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
  486. .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
  487. .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
  488. .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
  489. .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
  490. .ap_keepalive_min_idle_inactive_time_secs =
  491. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
  492. .ap_keepalive_max_idle_inactive_time_secs =
  493. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
  494. .ap_keepalive_max_unresponsive_time_secs =
  495. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
  496. .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
  497. .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
  498. .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
  499. .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
  500. .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
  501. .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
  502. .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
  503. .ap_detect_out_of_sync_sleeping_sta_time_secs =
  504. WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
  505. };
  506. static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
  507. .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
  508. .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
  509. .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
  510. .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
  511. .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
  512. .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
  513. .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
  514. .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
  515. .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
  516. .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
  517. .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
  518. .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
  519. .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
  520. .wmi_vdev_oc_scheduler_air_time_limit =
  521. WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
  522. .wds = WMI_10X_VDEV_PARAM_WDS,
  523. .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
  524. .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
  525. .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
  526. .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
  527. .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
  528. .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
  529. .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
  530. .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
  531. .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
  532. .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
  533. .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
  534. .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
  535. .sgi = WMI_10X_VDEV_PARAM_SGI,
  536. .ldpc = WMI_10X_VDEV_PARAM_LDPC,
  537. .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
  538. .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
  539. .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
  540. .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
  541. .nss = WMI_10X_VDEV_PARAM_NSS,
  542. .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
  543. .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
  544. .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
  545. .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
  546. .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
  547. .ap_keepalive_min_idle_inactive_time_secs =
  548. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
  549. .ap_keepalive_max_idle_inactive_time_secs =
  550. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
  551. .ap_keepalive_max_unresponsive_time_secs =
  552. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
  553. .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
  554. .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
  555. .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
  556. .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
  557. .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
  558. .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
  559. .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
  560. .ap_detect_out_of_sync_sleeping_sta_time_secs =
  561. WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
  562. };
  563. static struct wmi_pdev_param_map wmi_pdev_param_map = {
  564. .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
  565. .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
  566. .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
  567. .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
  568. .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
  569. .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
  570. .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
  571. .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
  572. .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
  573. .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
  574. .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
  575. .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
  576. .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
  577. .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
  578. .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
  579. .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
  580. .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
  581. .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
  582. .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
  583. .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
  584. .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
  585. .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
  586. .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
  587. .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
  588. .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
  589. .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
  590. .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
  591. .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
  592. .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
  593. .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
  594. .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
  595. .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
  596. .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
  597. .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
  598. .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
  599. .dcs = WMI_PDEV_PARAM_DCS,
  600. .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
  601. .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
  602. .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
  603. .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
  604. .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
  605. .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
  606. .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
  607. .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
  608. .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
  609. .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
  610. .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
  611. .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
  612. .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
  613. };
  614. static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
  615. .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
  616. .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
  617. .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
  618. .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
  619. .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
  620. .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
  621. .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
  622. .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
  623. .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
  624. .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
  625. .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
  626. .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
  627. .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
  628. .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
  629. .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
  630. .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
  631. .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
  632. .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
  633. .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
  634. .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
  635. .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
  636. .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
  637. .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
  638. .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
  639. .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
  640. .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
  641. .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
  642. .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
  643. .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
  644. .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
  645. .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
  646. .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
  647. .bcnflt_stats_update_period =
  648. WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
  649. .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
  650. .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
  651. .dcs = WMI_10X_PDEV_PARAM_DCS,
  652. .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
  653. .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
  654. .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
  655. .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
  656. .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
  657. .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
  658. .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
  659. .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
  660. .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
  661. .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
  662. .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
  663. .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
  664. .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
  665. };
  666. static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
  667. .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
  668. .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
  669. .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
  670. .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
  671. .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
  672. .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
  673. .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
  674. .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
  675. .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
  676. .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
  677. .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
  678. .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
  679. .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
  680. .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
  681. .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
  682. .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
  683. .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
  684. .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
  685. .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
  686. .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
  687. .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
  688. .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
  689. .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
  690. .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
  691. .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
  692. .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
  693. .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
  694. .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
  695. .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
  696. .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
  697. .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
  698. .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
  699. .bcnflt_stats_update_period =
  700. WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
  701. .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
  702. .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
  703. .dcs = WMI_10X_PDEV_PARAM_DCS,
  704. .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
  705. .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
  706. .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
  707. .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
  708. .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
  709. .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
  710. .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
  711. .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
  712. .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
  713. .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
  714. .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
  715. .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
  716. .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
  717. };
  718. /* firmware 10.2 specific mappings */
  719. static struct wmi_cmd_map wmi_10_2_cmd_map = {
  720. .init_cmdid = WMI_10_2_INIT_CMDID,
  721. .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
  722. .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
  723. .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
  724. .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
  725. .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
  726. .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
  727. .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
  728. .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
  729. .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
  730. .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
  731. .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
  732. .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
  733. .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
  734. .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  735. .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
  736. .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
  737. .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
  738. .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
  739. .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
  740. .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
  741. .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
  742. .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
  743. .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
  744. .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
  745. .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
  746. .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
  747. .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
  748. .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
  749. .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
  750. .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
  751. .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
  752. .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
  753. .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
  754. .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
  755. .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
  756. .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  757. .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
  758. .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
  759. .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
  760. .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  761. .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
  762. .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
  763. .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
  764. .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
  765. .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
  766. .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
  767. .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
  768. .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
  769. .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
  770. .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
  771. .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
  772. .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
  773. .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
  774. .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
  775. .roam_scan_rssi_change_threshold =
  776. WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  777. .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
  778. .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
  779. .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
  780. .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
  781. .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
  782. .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
  783. .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
  784. .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
  785. .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
  786. .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
  787. .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
  788. .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
  789. .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
  790. .wlan_profile_set_hist_intvl_cmdid =
  791. WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  792. .wlan_profile_get_profile_data_cmdid =
  793. WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  794. .wlan_profile_enable_profile_id_cmdid =
  795. WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  796. .wlan_profile_list_profile_id_cmdid =
  797. WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  798. .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
  799. .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
  800. .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
  801. .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
  802. .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
  803. .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
  804. .wow_enable_disable_wake_event_cmdid =
  805. WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  806. .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
  807. .wow_hostwakeup_from_sleep_cmdid =
  808. WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  809. .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
  810. .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
  811. .vdev_spectral_scan_configure_cmdid =
  812. WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  813. .vdev_spectral_scan_enable_cmdid =
  814. WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  815. .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
  816. .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
  817. .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
  818. .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
  819. .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
  820. .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
  821. .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
  822. .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
  823. .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
  824. .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
  825. .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
  826. .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
  827. .echo_cmdid = WMI_10_2_ECHO_CMDID,
  828. .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
  829. .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
  830. .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
  831. .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
  832. .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  833. .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  834. .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
  835. .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
  836. .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
  837. .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
  838. };
  839. void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
  840. const struct wmi_channel_arg *arg)
  841. {
  842. u32 flags = 0;
  843. memset(ch, 0, sizeof(*ch));
  844. if (arg->passive)
  845. flags |= WMI_CHAN_FLAG_PASSIVE;
  846. if (arg->allow_ibss)
  847. flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
  848. if (arg->allow_ht)
  849. flags |= WMI_CHAN_FLAG_ALLOW_HT;
  850. if (arg->allow_vht)
  851. flags |= WMI_CHAN_FLAG_ALLOW_VHT;
  852. if (arg->ht40plus)
  853. flags |= WMI_CHAN_FLAG_HT40_PLUS;
  854. if (arg->chan_radar)
  855. flags |= WMI_CHAN_FLAG_DFS;
  856. ch->mhz = __cpu_to_le32(arg->freq);
  857. ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
  858. ch->band_center_freq2 = 0;
  859. ch->min_power = arg->min_power;
  860. ch->max_power = arg->max_power;
  861. ch->reg_power = arg->max_reg_power;
  862. ch->antenna_max = arg->max_antenna_gain;
  863. /* mode & flags share storage */
  864. ch->mode = arg->mode;
  865. ch->flags |= __cpu_to_le32(flags);
  866. }
  867. int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
  868. {
  869. int ret;
  870. ret = wait_for_completion_timeout(&ar->wmi.service_ready,
  871. WMI_SERVICE_READY_TIMEOUT_HZ);
  872. return ret;
  873. }
  874. int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
  875. {
  876. int ret;
  877. ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
  878. WMI_UNIFIED_READY_TIMEOUT_HZ);
  879. return ret;
  880. }
  881. struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
  882. {
  883. struct sk_buff *skb;
  884. u32 round_len = roundup(len, 4);
  885. skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
  886. if (!skb)
  887. return NULL;
  888. skb_reserve(skb, WMI_SKB_HEADROOM);
  889. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  890. ath10k_warn(ar, "Unaligned WMI skb\n");
  891. skb_put(skb, round_len);
  892. memset(skb->data, 0, round_len);
  893. return skb;
  894. }
  895. static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
  896. {
  897. dev_kfree_skb(skb);
  898. }
  899. int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
  900. u32 cmd_id)
  901. {
  902. struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
  903. struct wmi_cmd_hdr *cmd_hdr;
  904. int ret;
  905. u32 cmd = 0;
  906. if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  907. return -ENOMEM;
  908. cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
  909. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  910. cmd_hdr->cmd_id = __cpu_to_le32(cmd);
  911. memset(skb_cb, 0, sizeof(*skb_cb));
  912. ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
  913. trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
  914. if (ret)
  915. goto err_pull;
  916. return 0;
  917. err_pull:
  918. skb_pull(skb, sizeof(struct wmi_cmd_hdr));
  919. return ret;
  920. }
  921. static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
  922. {
  923. struct ath10k *ar = arvif->ar;
  924. struct ath10k_skb_cb *cb;
  925. struct sk_buff *bcn;
  926. int ret;
  927. spin_lock_bh(&ar->data_lock);
  928. bcn = arvif->beacon;
  929. if (!bcn)
  930. goto unlock;
  931. cb = ATH10K_SKB_CB(bcn);
  932. switch (arvif->beacon_state) {
  933. case ATH10K_BEACON_SENDING:
  934. case ATH10K_BEACON_SENT:
  935. break;
  936. case ATH10K_BEACON_SCHEDULED:
  937. arvif->beacon_state = ATH10K_BEACON_SENDING;
  938. spin_unlock_bh(&ar->data_lock);
  939. ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
  940. arvif->vdev_id,
  941. bcn->data, bcn->len,
  942. cb->paddr,
  943. cb->bcn.dtim_zero,
  944. cb->bcn.deliver_cab);
  945. spin_lock_bh(&ar->data_lock);
  946. if (ret == 0)
  947. arvif->beacon_state = ATH10K_BEACON_SENT;
  948. else
  949. arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
  950. }
  951. unlock:
  952. spin_unlock_bh(&ar->data_lock);
  953. }
  954. static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
  955. struct ieee80211_vif *vif)
  956. {
  957. struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  958. ath10k_wmi_tx_beacon_nowait(arvif);
  959. }
  960. static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
  961. {
  962. ieee80211_iterate_active_interfaces_atomic(ar->hw,
  963. IEEE80211_IFACE_ITER_NORMAL,
  964. ath10k_wmi_tx_beacons_iter,
  965. NULL);
  966. }
  967. static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
  968. {
  969. /* try to send pending beacons first. they take priority */
  970. ath10k_wmi_tx_beacons_nowait(ar);
  971. wake_up(&ar->wmi.tx_credits_wq);
  972. }
  973. int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
  974. {
  975. int ret = -EOPNOTSUPP;
  976. might_sleep();
  977. if (cmd_id == WMI_CMD_UNSUPPORTED) {
  978. ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
  979. cmd_id);
  980. return ret;
  981. }
  982. wait_event_timeout(ar->wmi.tx_credits_wq, ({
  983. /* try to send pending beacons first. they take priority */
  984. ath10k_wmi_tx_beacons_nowait(ar);
  985. ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
  986. if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
  987. ret = -ESHUTDOWN;
  988. (ret != -EAGAIN);
  989. }), 3*HZ);
  990. if (ret)
  991. dev_kfree_skb_any(skb);
  992. return ret;
  993. }
  994. static struct sk_buff *
  995. ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
  996. {
  997. struct wmi_mgmt_tx_cmd *cmd;
  998. struct ieee80211_hdr *hdr;
  999. struct sk_buff *skb;
  1000. int len;
  1001. u32 buf_len = msdu->len;
  1002. u16 fc;
  1003. hdr = (struct ieee80211_hdr *)msdu->data;
  1004. fc = le16_to_cpu(hdr->frame_control);
  1005. if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
  1006. return ERR_PTR(-EINVAL);
  1007. len = sizeof(cmd->hdr) + msdu->len;
  1008. if ((ieee80211_is_action(hdr->frame_control) ||
  1009. ieee80211_is_deauth(hdr->frame_control) ||
  1010. ieee80211_is_disassoc(hdr->frame_control)) &&
  1011. ieee80211_has_protected(hdr->frame_control)) {
  1012. len += IEEE80211_CCMP_MIC_LEN;
  1013. buf_len += IEEE80211_CCMP_MIC_LEN;
  1014. }
  1015. len = round_up(len, 4);
  1016. skb = ath10k_wmi_alloc_skb(ar, len);
  1017. if (!skb)
  1018. return ERR_PTR(-ENOMEM);
  1019. cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
  1020. cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(msdu)->vdev_id);
  1021. cmd->hdr.tx_rate = 0;
  1022. cmd->hdr.tx_power = 0;
  1023. cmd->hdr.buf_len = __cpu_to_le32(buf_len);
  1024. ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
  1025. memcpy(cmd->buf, msdu->data, msdu->len);
  1026. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
  1027. msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
  1028. fc & IEEE80211_FCTL_STYPE);
  1029. trace_ath10k_tx_hdr(ar, skb->data, skb->len);
  1030. trace_ath10k_tx_payload(ar, skb->data, skb->len);
  1031. return skb;
  1032. }
  1033. static void ath10k_wmi_event_scan_started(struct ath10k *ar)
  1034. {
  1035. lockdep_assert_held(&ar->data_lock);
  1036. switch (ar->scan.state) {
  1037. case ATH10K_SCAN_IDLE:
  1038. case ATH10K_SCAN_RUNNING:
  1039. case ATH10K_SCAN_ABORTING:
  1040. ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
  1041. ath10k_scan_state_str(ar->scan.state),
  1042. ar->scan.state);
  1043. break;
  1044. case ATH10K_SCAN_STARTING:
  1045. ar->scan.state = ATH10K_SCAN_RUNNING;
  1046. if (ar->scan.is_roc)
  1047. ieee80211_ready_on_channel(ar->hw);
  1048. complete(&ar->scan.started);
  1049. break;
  1050. }
  1051. }
  1052. static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
  1053. {
  1054. lockdep_assert_held(&ar->data_lock);
  1055. switch (ar->scan.state) {
  1056. case ATH10K_SCAN_IDLE:
  1057. case ATH10K_SCAN_RUNNING:
  1058. case ATH10K_SCAN_ABORTING:
  1059. ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
  1060. ath10k_scan_state_str(ar->scan.state),
  1061. ar->scan.state);
  1062. break;
  1063. case ATH10K_SCAN_STARTING:
  1064. complete(&ar->scan.started);
  1065. __ath10k_scan_finish(ar);
  1066. break;
  1067. }
  1068. }
  1069. static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
  1070. {
  1071. lockdep_assert_held(&ar->data_lock);
  1072. switch (ar->scan.state) {
  1073. case ATH10K_SCAN_IDLE:
  1074. case ATH10K_SCAN_STARTING:
  1075. /* One suspected reason scan can be completed while starting is
  1076. * if firmware fails to deliver all scan events to the host,
  1077. * e.g. when transport pipe is full. This has been observed
  1078. * with spectral scan phyerr events starving wmi transport
  1079. * pipe. In such case the "scan completed" event should be (and
  1080. * is) ignored by the host as it may be just firmware's scan
  1081. * state machine recovering.
  1082. */
  1083. ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
  1084. ath10k_scan_state_str(ar->scan.state),
  1085. ar->scan.state);
  1086. break;
  1087. case ATH10K_SCAN_RUNNING:
  1088. case ATH10K_SCAN_ABORTING:
  1089. __ath10k_scan_finish(ar);
  1090. break;
  1091. }
  1092. }
  1093. static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
  1094. {
  1095. lockdep_assert_held(&ar->data_lock);
  1096. switch (ar->scan.state) {
  1097. case ATH10K_SCAN_IDLE:
  1098. case ATH10K_SCAN_STARTING:
  1099. ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
  1100. ath10k_scan_state_str(ar->scan.state),
  1101. ar->scan.state);
  1102. break;
  1103. case ATH10K_SCAN_RUNNING:
  1104. case ATH10K_SCAN_ABORTING:
  1105. ar->scan_channel = NULL;
  1106. break;
  1107. }
  1108. }
  1109. static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
  1110. {
  1111. lockdep_assert_held(&ar->data_lock);
  1112. switch (ar->scan.state) {
  1113. case ATH10K_SCAN_IDLE:
  1114. case ATH10K_SCAN_STARTING:
  1115. ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
  1116. ath10k_scan_state_str(ar->scan.state),
  1117. ar->scan.state);
  1118. break;
  1119. case ATH10K_SCAN_RUNNING:
  1120. case ATH10K_SCAN_ABORTING:
  1121. ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
  1122. if (ar->scan.is_roc && ar->scan.roc_freq == freq)
  1123. complete(&ar->scan.on_channel);
  1124. break;
  1125. }
  1126. }
  1127. static const char *
  1128. ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
  1129. enum wmi_scan_completion_reason reason)
  1130. {
  1131. switch (type) {
  1132. case WMI_SCAN_EVENT_STARTED:
  1133. return "started";
  1134. case WMI_SCAN_EVENT_COMPLETED:
  1135. switch (reason) {
  1136. case WMI_SCAN_REASON_COMPLETED:
  1137. return "completed";
  1138. case WMI_SCAN_REASON_CANCELLED:
  1139. return "completed [cancelled]";
  1140. case WMI_SCAN_REASON_PREEMPTED:
  1141. return "completed [preempted]";
  1142. case WMI_SCAN_REASON_TIMEDOUT:
  1143. return "completed [timedout]";
  1144. case WMI_SCAN_REASON_MAX:
  1145. break;
  1146. }
  1147. return "completed [unknown]";
  1148. case WMI_SCAN_EVENT_BSS_CHANNEL:
  1149. return "bss channel";
  1150. case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
  1151. return "foreign channel";
  1152. case WMI_SCAN_EVENT_DEQUEUED:
  1153. return "dequeued";
  1154. case WMI_SCAN_EVENT_PREEMPTED:
  1155. return "preempted";
  1156. case WMI_SCAN_EVENT_START_FAILED:
  1157. return "start failed";
  1158. default:
  1159. return "unknown";
  1160. }
  1161. }
  1162. static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
  1163. struct wmi_scan_ev_arg *arg)
  1164. {
  1165. struct wmi_scan_event *ev = (void *)skb->data;
  1166. if (skb->len < sizeof(*ev))
  1167. return -EPROTO;
  1168. skb_pull(skb, sizeof(*ev));
  1169. arg->event_type = ev->event_type;
  1170. arg->reason = ev->reason;
  1171. arg->channel_freq = ev->channel_freq;
  1172. arg->scan_req_id = ev->scan_req_id;
  1173. arg->scan_id = ev->scan_id;
  1174. arg->vdev_id = ev->vdev_id;
  1175. return 0;
  1176. }
  1177. int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
  1178. {
  1179. struct wmi_scan_ev_arg arg = {};
  1180. enum wmi_scan_event_type event_type;
  1181. enum wmi_scan_completion_reason reason;
  1182. u32 freq;
  1183. u32 req_id;
  1184. u32 scan_id;
  1185. u32 vdev_id;
  1186. int ret;
  1187. ret = ath10k_wmi_pull_scan(ar, skb, &arg);
  1188. if (ret) {
  1189. ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
  1190. return ret;
  1191. }
  1192. event_type = __le32_to_cpu(arg.event_type);
  1193. reason = __le32_to_cpu(arg.reason);
  1194. freq = __le32_to_cpu(arg.channel_freq);
  1195. req_id = __le32_to_cpu(arg.scan_req_id);
  1196. scan_id = __le32_to_cpu(arg.scan_id);
  1197. vdev_id = __le32_to_cpu(arg.vdev_id);
  1198. spin_lock_bh(&ar->data_lock);
  1199. ath10k_dbg(ar, ATH10K_DBG_WMI,
  1200. "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
  1201. ath10k_wmi_event_scan_type_str(event_type, reason),
  1202. event_type, reason, freq, req_id, scan_id, vdev_id,
  1203. ath10k_scan_state_str(ar->scan.state), ar->scan.state);
  1204. switch (event_type) {
  1205. case WMI_SCAN_EVENT_STARTED:
  1206. ath10k_wmi_event_scan_started(ar);
  1207. break;
  1208. case WMI_SCAN_EVENT_COMPLETED:
  1209. ath10k_wmi_event_scan_completed(ar);
  1210. break;
  1211. case WMI_SCAN_EVENT_BSS_CHANNEL:
  1212. ath10k_wmi_event_scan_bss_chan(ar);
  1213. break;
  1214. case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
  1215. ath10k_wmi_event_scan_foreign_chan(ar, freq);
  1216. break;
  1217. case WMI_SCAN_EVENT_START_FAILED:
  1218. ath10k_warn(ar, "received scan start failure event\n");
  1219. ath10k_wmi_event_scan_start_failed(ar);
  1220. break;
  1221. case WMI_SCAN_EVENT_DEQUEUED:
  1222. case WMI_SCAN_EVENT_PREEMPTED:
  1223. default:
  1224. break;
  1225. }
  1226. spin_unlock_bh(&ar->data_lock);
  1227. return 0;
  1228. }
  1229. static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
  1230. {
  1231. enum ieee80211_band band;
  1232. switch (phy_mode) {
  1233. case MODE_11A:
  1234. case MODE_11NA_HT20:
  1235. case MODE_11NA_HT40:
  1236. case MODE_11AC_VHT20:
  1237. case MODE_11AC_VHT40:
  1238. case MODE_11AC_VHT80:
  1239. band = IEEE80211_BAND_5GHZ;
  1240. break;
  1241. case MODE_11G:
  1242. case MODE_11B:
  1243. case MODE_11GONLY:
  1244. case MODE_11NG_HT20:
  1245. case MODE_11NG_HT40:
  1246. case MODE_11AC_VHT20_2G:
  1247. case MODE_11AC_VHT40_2G:
  1248. case MODE_11AC_VHT80_2G:
  1249. default:
  1250. band = IEEE80211_BAND_2GHZ;
  1251. }
  1252. return band;
  1253. }
  1254. static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
  1255. {
  1256. u8 rate_idx = 0;
  1257. /* rate in Kbps */
  1258. switch (rate) {
  1259. case 1000:
  1260. rate_idx = 0;
  1261. break;
  1262. case 2000:
  1263. rate_idx = 1;
  1264. break;
  1265. case 5500:
  1266. rate_idx = 2;
  1267. break;
  1268. case 11000:
  1269. rate_idx = 3;
  1270. break;
  1271. case 6000:
  1272. rate_idx = 4;
  1273. break;
  1274. case 9000:
  1275. rate_idx = 5;
  1276. break;
  1277. case 12000:
  1278. rate_idx = 6;
  1279. break;
  1280. case 18000:
  1281. rate_idx = 7;
  1282. break;
  1283. case 24000:
  1284. rate_idx = 8;
  1285. break;
  1286. case 36000:
  1287. rate_idx = 9;
  1288. break;
  1289. case 48000:
  1290. rate_idx = 10;
  1291. break;
  1292. case 54000:
  1293. rate_idx = 11;
  1294. break;
  1295. default:
  1296. break;
  1297. }
  1298. if (band == IEEE80211_BAND_5GHZ) {
  1299. if (rate_idx > 3)
  1300. /* Omit CCK rates */
  1301. rate_idx -= 4;
  1302. else
  1303. rate_idx = 0;
  1304. }
  1305. return rate_idx;
  1306. }
  1307. /* If keys are configured, HW decrypts all frames
  1308. * with protected bit set. Mark such frames as decrypted.
  1309. */
  1310. static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
  1311. struct sk_buff *skb,
  1312. struct ieee80211_rx_status *status)
  1313. {
  1314. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1315. unsigned int hdrlen;
  1316. bool peer_key;
  1317. u8 *addr, keyidx;
  1318. if (!ieee80211_is_auth(hdr->frame_control) ||
  1319. !ieee80211_has_protected(hdr->frame_control))
  1320. return;
  1321. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  1322. if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
  1323. return;
  1324. keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
  1325. addr = ieee80211_get_SA(hdr);
  1326. spin_lock_bh(&ar->data_lock);
  1327. peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
  1328. spin_unlock_bh(&ar->data_lock);
  1329. if (peer_key) {
  1330. ath10k_dbg(ar, ATH10K_DBG_MAC,
  1331. "mac wep key present for peer %pM\n", addr);
  1332. status->flag |= RX_FLAG_DECRYPTED;
  1333. }
  1334. }
  1335. static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
  1336. struct wmi_mgmt_rx_ev_arg *arg)
  1337. {
  1338. struct wmi_mgmt_rx_event_v1 *ev_v1;
  1339. struct wmi_mgmt_rx_event_v2 *ev_v2;
  1340. struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
  1341. size_t pull_len;
  1342. u32 msdu_len;
  1343. if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
  1344. ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
  1345. ev_hdr = &ev_v2->hdr.v1;
  1346. pull_len = sizeof(*ev_v2);
  1347. } else {
  1348. ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
  1349. ev_hdr = &ev_v1->hdr;
  1350. pull_len = sizeof(*ev_v1);
  1351. }
  1352. if (skb->len < pull_len)
  1353. return -EPROTO;
  1354. skb_pull(skb, pull_len);
  1355. arg->channel = ev_hdr->channel;
  1356. arg->buf_len = ev_hdr->buf_len;
  1357. arg->status = ev_hdr->status;
  1358. arg->snr = ev_hdr->snr;
  1359. arg->phy_mode = ev_hdr->phy_mode;
  1360. arg->rate = ev_hdr->rate;
  1361. msdu_len = __le32_to_cpu(arg->buf_len);
  1362. if (skb->len < msdu_len)
  1363. return -EPROTO;
  1364. /* the WMI buffer might've ended up being padded to 4 bytes due to HTC
  1365. * trailer with credit update. Trim the excess garbage.
  1366. */
  1367. skb_trim(skb, msdu_len);
  1368. return 0;
  1369. }
  1370. int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
  1371. {
  1372. struct wmi_mgmt_rx_ev_arg arg = {};
  1373. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  1374. struct ieee80211_hdr *hdr;
  1375. u32 rx_status;
  1376. u32 channel;
  1377. u32 phy_mode;
  1378. u32 snr;
  1379. u32 rate;
  1380. u32 buf_len;
  1381. u16 fc;
  1382. int ret;
  1383. ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
  1384. if (ret) {
  1385. ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
  1386. return ret;
  1387. }
  1388. channel = __le32_to_cpu(arg.channel);
  1389. buf_len = __le32_to_cpu(arg.buf_len);
  1390. rx_status = __le32_to_cpu(arg.status);
  1391. snr = __le32_to_cpu(arg.snr);
  1392. phy_mode = __le32_to_cpu(arg.phy_mode);
  1393. rate = __le32_to_cpu(arg.rate);
  1394. memset(status, 0, sizeof(*status));
  1395. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1396. "event mgmt rx status %08x\n", rx_status);
  1397. if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  1398. dev_kfree_skb(skb);
  1399. return 0;
  1400. }
  1401. if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
  1402. dev_kfree_skb(skb);
  1403. return 0;
  1404. }
  1405. if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
  1406. dev_kfree_skb(skb);
  1407. return 0;
  1408. }
  1409. if (rx_status & WMI_RX_STATUS_ERR_CRC) {
  1410. dev_kfree_skb(skb);
  1411. return 0;
  1412. }
  1413. if (rx_status & WMI_RX_STATUS_ERR_MIC)
  1414. status->flag |= RX_FLAG_MMIC_ERROR;
  1415. /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
  1416. * MODE_11B. This means phy_mode is not a reliable source for the band
  1417. * of mgmt rx.
  1418. */
  1419. if (channel >= 1 && channel <= 14) {
  1420. status->band = IEEE80211_BAND_2GHZ;
  1421. } else if (channel >= 36 && channel <= 165) {
  1422. status->band = IEEE80211_BAND_5GHZ;
  1423. } else {
  1424. /* Shouldn't happen unless list of advertised channels to
  1425. * mac80211 has been changed.
  1426. */
  1427. WARN_ON_ONCE(1);
  1428. dev_kfree_skb(skb);
  1429. return 0;
  1430. }
  1431. if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
  1432. ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
  1433. status->freq = ieee80211_channel_to_frequency(channel, status->band);
  1434. status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
  1435. status->rate_idx = get_rate_idx(rate, status->band);
  1436. hdr = (struct ieee80211_hdr *)skb->data;
  1437. fc = le16_to_cpu(hdr->frame_control);
  1438. ath10k_wmi_handle_wep_reauth(ar, skb, status);
  1439. /* FW delivers WEP Shared Auth frame with Protected Bit set and
  1440. * encrypted payload. However in case of PMF it delivers decrypted
  1441. * frames with Protected Bit set. */
  1442. if (ieee80211_has_protected(hdr->frame_control) &&
  1443. !ieee80211_is_auth(hdr->frame_control)) {
  1444. status->flag |= RX_FLAG_DECRYPTED;
  1445. if (!ieee80211_is_action(hdr->frame_control) &&
  1446. !ieee80211_is_deauth(hdr->frame_control) &&
  1447. !ieee80211_is_disassoc(hdr->frame_control)) {
  1448. status->flag |= RX_FLAG_IV_STRIPPED |
  1449. RX_FLAG_MMIC_STRIPPED;
  1450. hdr->frame_control = __cpu_to_le16(fc &
  1451. ~IEEE80211_FCTL_PROTECTED);
  1452. }
  1453. }
  1454. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1455. "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
  1456. skb, skb->len,
  1457. fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
  1458. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1459. "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
  1460. status->freq, status->band, status->signal,
  1461. status->rate_idx);
  1462. ieee80211_rx(ar->hw, skb);
  1463. return 0;
  1464. }
  1465. static int freq_to_idx(struct ath10k *ar, int freq)
  1466. {
  1467. struct ieee80211_supported_band *sband;
  1468. int band, ch, idx = 0;
  1469. for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
  1470. sband = ar->hw->wiphy->bands[band];
  1471. if (!sband)
  1472. continue;
  1473. for (ch = 0; ch < sband->n_channels; ch++, idx++)
  1474. if (sband->channels[ch].center_freq == freq)
  1475. goto exit;
  1476. }
  1477. exit:
  1478. return idx;
  1479. }
  1480. static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
  1481. struct wmi_ch_info_ev_arg *arg)
  1482. {
  1483. struct wmi_chan_info_event *ev = (void *)skb->data;
  1484. if (skb->len < sizeof(*ev))
  1485. return -EPROTO;
  1486. skb_pull(skb, sizeof(*ev));
  1487. arg->err_code = ev->err_code;
  1488. arg->freq = ev->freq;
  1489. arg->cmd_flags = ev->cmd_flags;
  1490. arg->noise_floor = ev->noise_floor;
  1491. arg->rx_clear_count = ev->rx_clear_count;
  1492. arg->cycle_count = ev->cycle_count;
  1493. return 0;
  1494. }
  1495. void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
  1496. {
  1497. struct wmi_ch_info_ev_arg arg = {};
  1498. struct survey_info *survey;
  1499. u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
  1500. int idx, ret;
  1501. ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
  1502. if (ret) {
  1503. ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
  1504. return;
  1505. }
  1506. err_code = __le32_to_cpu(arg.err_code);
  1507. freq = __le32_to_cpu(arg.freq);
  1508. cmd_flags = __le32_to_cpu(arg.cmd_flags);
  1509. noise_floor = __le32_to_cpu(arg.noise_floor);
  1510. rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
  1511. cycle_count = __le32_to_cpu(arg.cycle_count);
  1512. ath10k_dbg(ar, ATH10K_DBG_WMI,
  1513. "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
  1514. err_code, freq, cmd_flags, noise_floor, rx_clear_count,
  1515. cycle_count);
  1516. spin_lock_bh(&ar->data_lock);
  1517. switch (ar->scan.state) {
  1518. case ATH10K_SCAN_IDLE:
  1519. case ATH10K_SCAN_STARTING:
  1520. ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
  1521. goto exit;
  1522. case ATH10K_SCAN_RUNNING:
  1523. case ATH10K_SCAN_ABORTING:
  1524. break;
  1525. }
  1526. idx = freq_to_idx(ar, freq);
  1527. if (idx >= ARRAY_SIZE(ar->survey)) {
  1528. ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
  1529. freq, idx);
  1530. goto exit;
  1531. }
  1532. if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
  1533. /* During scanning chan info is reported twice for each
  1534. * visited channel. The reported cycle count is global
  1535. * and per-channel cycle count must be calculated */
  1536. cycle_count -= ar->survey_last_cycle_count;
  1537. rx_clear_count -= ar->survey_last_rx_clear_count;
  1538. survey = &ar->survey[idx];
  1539. survey->time = WMI_CHAN_INFO_MSEC(cycle_count);
  1540. survey->time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
  1541. survey->noise = noise_floor;
  1542. survey->filled = SURVEY_INFO_TIME |
  1543. SURVEY_INFO_TIME_RX |
  1544. SURVEY_INFO_NOISE_DBM;
  1545. }
  1546. ar->survey_last_rx_clear_count = rx_clear_count;
  1547. ar->survey_last_cycle_count = cycle_count;
  1548. exit:
  1549. spin_unlock_bh(&ar->data_lock);
  1550. }
  1551. void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
  1552. {
  1553. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
  1554. }
  1555. int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
  1556. {
  1557. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
  1558. skb->len);
  1559. trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
  1560. return 0;
  1561. }
  1562. void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
  1563. struct ath10k_fw_stats_pdev *dst)
  1564. {
  1565. dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
  1566. dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
  1567. dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
  1568. dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
  1569. dst->cycle_count = __le32_to_cpu(src->cycle_count);
  1570. dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
  1571. dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
  1572. }
  1573. void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
  1574. struct ath10k_fw_stats_pdev *dst)
  1575. {
  1576. dst->comp_queued = __le32_to_cpu(src->comp_queued);
  1577. dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
  1578. dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
  1579. dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
  1580. dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
  1581. dst->local_enqued = __le32_to_cpu(src->local_enqued);
  1582. dst->local_freed = __le32_to_cpu(src->local_freed);
  1583. dst->hw_queued = __le32_to_cpu(src->hw_queued);
  1584. dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
  1585. dst->underrun = __le32_to_cpu(src->underrun);
  1586. dst->tx_abort = __le32_to_cpu(src->tx_abort);
  1587. dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
  1588. dst->tx_ko = __le32_to_cpu(src->tx_ko);
  1589. dst->data_rc = __le32_to_cpu(src->data_rc);
  1590. dst->self_triggers = __le32_to_cpu(src->self_triggers);
  1591. dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
  1592. dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
  1593. dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
  1594. dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
  1595. dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
  1596. dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
  1597. dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
  1598. }
  1599. void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
  1600. struct ath10k_fw_stats_pdev *dst)
  1601. {
  1602. dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
  1603. dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
  1604. dst->r0_frags = __le32_to_cpu(src->r0_frags);
  1605. dst->r1_frags = __le32_to_cpu(src->r1_frags);
  1606. dst->r2_frags = __le32_to_cpu(src->r2_frags);
  1607. dst->r3_frags = __le32_to_cpu(src->r3_frags);
  1608. dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
  1609. dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
  1610. dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
  1611. dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
  1612. dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
  1613. dst->phy_errs = __le32_to_cpu(src->phy_errs);
  1614. dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
  1615. dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
  1616. }
  1617. void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
  1618. struct ath10k_fw_stats_pdev *dst)
  1619. {
  1620. dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
  1621. dst->rts_bad = __le32_to_cpu(src->rts_bad);
  1622. dst->rts_good = __le32_to_cpu(src->rts_good);
  1623. dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
  1624. dst->no_beacons = __le32_to_cpu(src->no_beacons);
  1625. dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
  1626. }
  1627. void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
  1628. struct ath10k_fw_stats_peer *dst)
  1629. {
  1630. ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
  1631. dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
  1632. dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
  1633. }
  1634. static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
  1635. struct sk_buff *skb,
  1636. struct ath10k_fw_stats *stats)
  1637. {
  1638. const struct wmi_stats_event *ev = (void *)skb->data;
  1639. u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
  1640. int i;
  1641. if (!skb_pull(skb, sizeof(*ev)))
  1642. return -EPROTO;
  1643. num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  1644. num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  1645. num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  1646. for (i = 0; i < num_pdev_stats; i++) {
  1647. const struct wmi_pdev_stats *src;
  1648. struct ath10k_fw_stats_pdev *dst;
  1649. src = (void *)skb->data;
  1650. if (!skb_pull(skb, sizeof(*src)))
  1651. return -EPROTO;
  1652. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1653. if (!dst)
  1654. continue;
  1655. ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
  1656. ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
  1657. ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
  1658. list_add_tail(&dst->list, &stats->pdevs);
  1659. }
  1660. /* fw doesn't implement vdev stats */
  1661. for (i = 0; i < num_peer_stats; i++) {
  1662. const struct wmi_peer_stats *src;
  1663. struct ath10k_fw_stats_peer *dst;
  1664. src = (void *)skb->data;
  1665. if (!skb_pull(skb, sizeof(*src)))
  1666. return -EPROTO;
  1667. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1668. if (!dst)
  1669. continue;
  1670. ath10k_wmi_pull_peer_stats(src, dst);
  1671. list_add_tail(&dst->list, &stats->peers);
  1672. }
  1673. return 0;
  1674. }
  1675. static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
  1676. struct sk_buff *skb,
  1677. struct ath10k_fw_stats *stats)
  1678. {
  1679. const struct wmi_stats_event *ev = (void *)skb->data;
  1680. u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
  1681. int i;
  1682. if (!skb_pull(skb, sizeof(*ev)))
  1683. return -EPROTO;
  1684. num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  1685. num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  1686. num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  1687. for (i = 0; i < num_pdev_stats; i++) {
  1688. const struct wmi_10x_pdev_stats *src;
  1689. struct ath10k_fw_stats_pdev *dst;
  1690. src = (void *)skb->data;
  1691. if (!skb_pull(skb, sizeof(*src)))
  1692. return -EPROTO;
  1693. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1694. if (!dst)
  1695. continue;
  1696. ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
  1697. ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
  1698. ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
  1699. ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
  1700. list_add_tail(&dst->list, &stats->pdevs);
  1701. }
  1702. /* fw doesn't implement vdev stats */
  1703. for (i = 0; i < num_peer_stats; i++) {
  1704. const struct wmi_10x_peer_stats *src;
  1705. struct ath10k_fw_stats_peer *dst;
  1706. src = (void *)skb->data;
  1707. if (!skb_pull(skb, sizeof(*src)))
  1708. return -EPROTO;
  1709. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1710. if (!dst)
  1711. continue;
  1712. ath10k_wmi_pull_peer_stats(&src->old, dst);
  1713. dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
  1714. list_add_tail(&dst->list, &stats->peers);
  1715. }
  1716. return 0;
  1717. }
  1718. static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
  1719. struct sk_buff *skb,
  1720. struct ath10k_fw_stats *stats)
  1721. {
  1722. const struct wmi_10_2_stats_event *ev = (void *)skb->data;
  1723. u32 num_pdev_stats;
  1724. u32 num_pdev_ext_stats;
  1725. u32 num_vdev_stats;
  1726. u32 num_peer_stats;
  1727. int i;
  1728. if (!skb_pull(skb, sizeof(*ev)))
  1729. return -EPROTO;
  1730. num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  1731. num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
  1732. num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  1733. num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  1734. for (i = 0; i < num_pdev_stats; i++) {
  1735. const struct wmi_10_2_pdev_stats *src;
  1736. struct ath10k_fw_stats_pdev *dst;
  1737. src = (void *)skb->data;
  1738. if (!skb_pull(skb, sizeof(*src)))
  1739. return -EPROTO;
  1740. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1741. if (!dst)
  1742. continue;
  1743. ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
  1744. ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
  1745. ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
  1746. ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
  1747. /* FIXME: expose 10.2 specific values */
  1748. list_add_tail(&dst->list, &stats->pdevs);
  1749. }
  1750. for (i = 0; i < num_pdev_ext_stats; i++) {
  1751. const struct wmi_10_2_pdev_ext_stats *src;
  1752. src = (void *)skb->data;
  1753. if (!skb_pull(skb, sizeof(*src)))
  1754. return -EPROTO;
  1755. /* FIXME: expose values to userspace
  1756. *
  1757. * Note: Even though this loop seems to do nothing it is
  1758. * required to parse following sub-structures properly.
  1759. */
  1760. }
  1761. /* fw doesn't implement vdev stats */
  1762. for (i = 0; i < num_peer_stats; i++) {
  1763. const struct wmi_10_2_peer_stats *src;
  1764. struct ath10k_fw_stats_peer *dst;
  1765. src = (void *)skb->data;
  1766. if (!skb_pull(skb, sizeof(*src)))
  1767. return -EPROTO;
  1768. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1769. if (!dst)
  1770. continue;
  1771. ath10k_wmi_pull_peer_stats(&src->old, dst);
  1772. dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
  1773. /* FIXME: expose 10.2 specific values */
  1774. list_add_tail(&dst->list, &stats->peers);
  1775. }
  1776. return 0;
  1777. }
  1778. static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
  1779. struct sk_buff *skb,
  1780. struct ath10k_fw_stats *stats)
  1781. {
  1782. const struct wmi_10_2_stats_event *ev = (void *)skb->data;
  1783. u32 num_pdev_stats;
  1784. u32 num_pdev_ext_stats;
  1785. u32 num_vdev_stats;
  1786. u32 num_peer_stats;
  1787. int i;
  1788. if (!skb_pull(skb, sizeof(*ev)))
  1789. return -EPROTO;
  1790. num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  1791. num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
  1792. num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  1793. num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  1794. for (i = 0; i < num_pdev_stats; i++) {
  1795. const struct wmi_10_2_pdev_stats *src;
  1796. struct ath10k_fw_stats_pdev *dst;
  1797. src = (void *)skb->data;
  1798. if (!skb_pull(skb, sizeof(*src)))
  1799. return -EPROTO;
  1800. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1801. if (!dst)
  1802. continue;
  1803. ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
  1804. ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
  1805. ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
  1806. ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
  1807. /* FIXME: expose 10.2 specific values */
  1808. list_add_tail(&dst->list, &stats->pdevs);
  1809. }
  1810. for (i = 0; i < num_pdev_ext_stats; i++) {
  1811. const struct wmi_10_2_pdev_ext_stats *src;
  1812. src = (void *)skb->data;
  1813. if (!skb_pull(skb, sizeof(*src)))
  1814. return -EPROTO;
  1815. /* FIXME: expose values to userspace
  1816. *
  1817. * Note: Even though this loop seems to do nothing it is
  1818. * required to parse following sub-structures properly.
  1819. */
  1820. }
  1821. /* fw doesn't implement vdev stats */
  1822. for (i = 0; i < num_peer_stats; i++) {
  1823. const struct wmi_10_2_4_peer_stats *src;
  1824. struct ath10k_fw_stats_peer *dst;
  1825. src = (void *)skb->data;
  1826. if (!skb_pull(skb, sizeof(*src)))
  1827. return -EPROTO;
  1828. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1829. if (!dst)
  1830. continue;
  1831. ath10k_wmi_pull_peer_stats(&src->common.old, dst);
  1832. dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
  1833. /* FIXME: expose 10.2 specific values */
  1834. list_add_tail(&dst->list, &stats->peers);
  1835. }
  1836. return 0;
  1837. }
  1838. void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
  1839. {
  1840. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
  1841. ath10k_debug_fw_stats_process(ar, skb);
  1842. }
  1843. static int
  1844. ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
  1845. struct wmi_vdev_start_ev_arg *arg)
  1846. {
  1847. struct wmi_vdev_start_response_event *ev = (void *)skb->data;
  1848. if (skb->len < sizeof(*ev))
  1849. return -EPROTO;
  1850. skb_pull(skb, sizeof(*ev));
  1851. arg->vdev_id = ev->vdev_id;
  1852. arg->req_id = ev->req_id;
  1853. arg->resp_type = ev->resp_type;
  1854. arg->status = ev->status;
  1855. return 0;
  1856. }
  1857. void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
  1858. {
  1859. struct wmi_vdev_start_ev_arg arg = {};
  1860. int ret;
  1861. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
  1862. ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
  1863. if (ret) {
  1864. ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
  1865. return;
  1866. }
  1867. if (WARN_ON(__le32_to_cpu(arg.status)))
  1868. return;
  1869. complete(&ar->vdev_setup_done);
  1870. }
  1871. void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
  1872. {
  1873. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
  1874. complete(&ar->vdev_setup_done);
  1875. }
  1876. static int
  1877. ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
  1878. struct wmi_peer_kick_ev_arg *arg)
  1879. {
  1880. struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
  1881. if (skb->len < sizeof(*ev))
  1882. return -EPROTO;
  1883. skb_pull(skb, sizeof(*ev));
  1884. arg->mac_addr = ev->peer_macaddr.addr;
  1885. return 0;
  1886. }
  1887. void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
  1888. {
  1889. struct wmi_peer_kick_ev_arg arg = {};
  1890. struct ieee80211_sta *sta;
  1891. int ret;
  1892. ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
  1893. if (ret) {
  1894. ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
  1895. ret);
  1896. return;
  1897. }
  1898. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
  1899. arg.mac_addr);
  1900. rcu_read_lock();
  1901. sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
  1902. if (!sta) {
  1903. ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
  1904. arg.mac_addr);
  1905. goto exit;
  1906. }
  1907. ieee80211_report_low_ack(sta, 10);
  1908. exit:
  1909. rcu_read_unlock();
  1910. }
  1911. /*
  1912. * FIXME
  1913. *
  1914. * We don't report to mac80211 sleep state of connected
  1915. * stations. Due to this mac80211 can't fill in TIM IE
  1916. * correctly.
  1917. *
  1918. * I know of no way of getting nullfunc frames that contain
  1919. * sleep transition from connected stations - these do not
  1920. * seem to be sent from the target to the host. There also
  1921. * doesn't seem to be a dedicated event for that. So the
  1922. * only way left to do this would be to read tim_bitmap
  1923. * during SWBA.
  1924. *
  1925. * We could probably try using tim_bitmap from SWBA to tell
  1926. * mac80211 which stations are asleep and which are not. The
  1927. * problem here is calling mac80211 functions so many times
  1928. * could take too long and make us miss the time to submit
  1929. * the beacon to the target.
  1930. *
  1931. * So as a workaround we try to extend the TIM IE if there
  1932. * is unicast buffered for stations with aid > 7 and fill it
  1933. * in ourselves.
  1934. */
  1935. static void ath10k_wmi_update_tim(struct ath10k *ar,
  1936. struct ath10k_vif *arvif,
  1937. struct sk_buff *bcn,
  1938. const struct wmi_tim_info *tim_info)
  1939. {
  1940. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
  1941. struct ieee80211_tim_ie *tim;
  1942. u8 *ies, *ie;
  1943. u8 ie_len, pvm_len;
  1944. __le32 t;
  1945. u32 v;
  1946. /* if next SWBA has no tim_changed the tim_bitmap is garbage.
  1947. * we must copy the bitmap upon change and reuse it later */
  1948. if (__le32_to_cpu(tim_info->tim_changed)) {
  1949. int i;
  1950. BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
  1951. sizeof(tim_info->tim_bitmap));
  1952. for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
  1953. t = tim_info->tim_bitmap[i / 4];
  1954. v = __le32_to_cpu(t);
  1955. arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
  1956. }
  1957. /* FW reports either length 0 or 16
  1958. * so we calculate this on our own */
  1959. arvif->u.ap.tim_len = 0;
  1960. for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
  1961. if (arvif->u.ap.tim_bitmap[i])
  1962. arvif->u.ap.tim_len = i;
  1963. arvif->u.ap.tim_len++;
  1964. }
  1965. ies = bcn->data;
  1966. ies += ieee80211_hdrlen(hdr->frame_control);
  1967. ies += 12; /* fixed parameters */
  1968. ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
  1969. (u8 *)skb_tail_pointer(bcn) - ies);
  1970. if (!ie) {
  1971. if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
  1972. ath10k_warn(ar, "no tim ie found;\n");
  1973. return;
  1974. }
  1975. tim = (void *)ie + 2;
  1976. ie_len = ie[1];
  1977. pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
  1978. if (pvm_len < arvif->u.ap.tim_len) {
  1979. int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
  1980. int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
  1981. void *next_ie = ie + 2 + ie_len;
  1982. if (skb_put(bcn, expand_size)) {
  1983. memmove(next_ie + expand_size, next_ie, move_size);
  1984. ie[1] += expand_size;
  1985. ie_len += expand_size;
  1986. pvm_len += expand_size;
  1987. } else {
  1988. ath10k_warn(ar, "tim expansion failed\n");
  1989. }
  1990. }
  1991. if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
  1992. ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
  1993. return;
  1994. }
  1995. tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
  1996. memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
  1997. if (tim->dtim_count == 0) {
  1998. ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
  1999. if (__le32_to_cpu(tim_info->tim_mcast) == 1)
  2000. ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
  2001. }
  2002. ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
  2003. tim->dtim_count, tim->dtim_period,
  2004. tim->bitmap_ctrl, pvm_len);
  2005. }
  2006. static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
  2007. const struct wmi_p2p_noa_info *noa)
  2008. {
  2009. struct ieee80211_p2p_noa_attr *noa_attr;
  2010. u8 ctwindow_oppps = noa->ctwindow_oppps;
  2011. u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
  2012. bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
  2013. __le16 *noa_attr_len;
  2014. u16 attr_len;
  2015. u8 noa_descriptors = noa->num_descriptors;
  2016. int i;
  2017. /* P2P IE */
  2018. data[0] = WLAN_EID_VENDOR_SPECIFIC;
  2019. data[1] = len - 2;
  2020. data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
  2021. data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
  2022. data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
  2023. data[5] = WLAN_OUI_TYPE_WFA_P2P;
  2024. /* NOA ATTR */
  2025. data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
  2026. noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
  2027. noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
  2028. noa_attr->index = noa->index;
  2029. noa_attr->oppps_ctwindow = ctwindow;
  2030. if (oppps)
  2031. noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
  2032. for (i = 0; i < noa_descriptors; i++) {
  2033. noa_attr->desc[i].count =
  2034. __le32_to_cpu(noa->descriptors[i].type_count);
  2035. noa_attr->desc[i].duration = noa->descriptors[i].duration;
  2036. noa_attr->desc[i].interval = noa->descriptors[i].interval;
  2037. noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
  2038. }
  2039. attr_len = 2; /* index + oppps_ctwindow */
  2040. attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
  2041. *noa_attr_len = __cpu_to_le16(attr_len);
  2042. }
  2043. static u32 ath10k_p2p_calc_noa_ie_len(const struct wmi_p2p_noa_info *noa)
  2044. {
  2045. u32 len = 0;
  2046. u8 noa_descriptors = noa->num_descriptors;
  2047. u8 opp_ps_info = noa->ctwindow_oppps;
  2048. bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
  2049. if (!noa_descriptors && !opps_enabled)
  2050. return len;
  2051. len += 1 + 1 + 4; /* EID + len + OUI */
  2052. len += 1 + 2; /* noa attr + attr len */
  2053. len += 1 + 1; /* index + oppps_ctwindow */
  2054. len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
  2055. return len;
  2056. }
  2057. static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
  2058. struct sk_buff *bcn,
  2059. const struct wmi_p2p_noa_info *noa)
  2060. {
  2061. u8 *new_data, *old_data = arvif->u.ap.noa_data;
  2062. u32 new_len;
  2063. if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
  2064. return;
  2065. ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
  2066. if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
  2067. new_len = ath10k_p2p_calc_noa_ie_len(noa);
  2068. if (!new_len)
  2069. goto cleanup;
  2070. new_data = kmalloc(new_len, GFP_ATOMIC);
  2071. if (!new_data)
  2072. goto cleanup;
  2073. ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
  2074. spin_lock_bh(&ar->data_lock);
  2075. arvif->u.ap.noa_data = new_data;
  2076. arvif->u.ap.noa_len = new_len;
  2077. spin_unlock_bh(&ar->data_lock);
  2078. kfree(old_data);
  2079. }
  2080. if (arvif->u.ap.noa_data)
  2081. if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
  2082. memcpy(skb_put(bcn, arvif->u.ap.noa_len),
  2083. arvif->u.ap.noa_data,
  2084. arvif->u.ap.noa_len);
  2085. return;
  2086. cleanup:
  2087. spin_lock_bh(&ar->data_lock);
  2088. arvif->u.ap.noa_data = NULL;
  2089. arvif->u.ap.noa_len = 0;
  2090. spin_unlock_bh(&ar->data_lock);
  2091. kfree(old_data);
  2092. }
  2093. static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
  2094. struct wmi_swba_ev_arg *arg)
  2095. {
  2096. struct wmi_host_swba_event *ev = (void *)skb->data;
  2097. u32 map;
  2098. size_t i;
  2099. if (skb->len < sizeof(*ev))
  2100. return -EPROTO;
  2101. skb_pull(skb, sizeof(*ev));
  2102. arg->vdev_map = ev->vdev_map;
  2103. for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
  2104. if (!(map & BIT(0)))
  2105. continue;
  2106. /* If this happens there were some changes in firmware and
  2107. * ath10k should update the max size of tim_info array.
  2108. */
  2109. if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
  2110. break;
  2111. arg->tim_info[i] = &ev->bcn_info[i].tim_info;
  2112. arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
  2113. i++;
  2114. }
  2115. return 0;
  2116. }
  2117. void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
  2118. {
  2119. struct wmi_swba_ev_arg arg = {};
  2120. u32 map;
  2121. int i = -1;
  2122. const struct wmi_tim_info *tim_info;
  2123. const struct wmi_p2p_noa_info *noa_info;
  2124. struct ath10k_vif *arvif;
  2125. struct sk_buff *bcn;
  2126. dma_addr_t paddr;
  2127. int ret, vdev_id = 0;
  2128. ret = ath10k_wmi_pull_swba(ar, skb, &arg);
  2129. if (ret) {
  2130. ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
  2131. return;
  2132. }
  2133. map = __le32_to_cpu(arg.vdev_map);
  2134. ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
  2135. map);
  2136. for (; map; map >>= 1, vdev_id++) {
  2137. if (!(map & 0x1))
  2138. continue;
  2139. i++;
  2140. if (i >= WMI_MAX_AP_VDEV) {
  2141. ath10k_warn(ar, "swba has corrupted vdev map\n");
  2142. break;
  2143. }
  2144. tim_info = arg.tim_info[i];
  2145. noa_info = arg.noa_info[i];
  2146. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  2147. "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
  2148. i,
  2149. __le32_to_cpu(tim_info->tim_len),
  2150. __le32_to_cpu(tim_info->tim_mcast),
  2151. __le32_to_cpu(tim_info->tim_changed),
  2152. __le32_to_cpu(tim_info->tim_num_ps_pending),
  2153. __le32_to_cpu(tim_info->tim_bitmap[3]),
  2154. __le32_to_cpu(tim_info->tim_bitmap[2]),
  2155. __le32_to_cpu(tim_info->tim_bitmap[1]),
  2156. __le32_to_cpu(tim_info->tim_bitmap[0]));
  2157. arvif = ath10k_get_arvif(ar, vdev_id);
  2158. if (arvif == NULL) {
  2159. ath10k_warn(ar, "no vif for vdev_id %d found\n",
  2160. vdev_id);
  2161. continue;
  2162. }
  2163. /* There are no completions for beacons so wait for next SWBA
  2164. * before telling mac80211 to decrement CSA counter
  2165. *
  2166. * Once CSA counter is completed stop sending beacons until
  2167. * actual channel switch is done */
  2168. if (arvif->vif->csa_active &&
  2169. ieee80211_csa_is_complete(arvif->vif)) {
  2170. ieee80211_csa_finish(arvif->vif);
  2171. continue;
  2172. }
  2173. bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
  2174. if (!bcn) {
  2175. ath10k_warn(ar, "could not get mac80211 beacon\n");
  2176. continue;
  2177. }
  2178. ath10k_tx_h_seq_no(arvif->vif, bcn);
  2179. ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
  2180. ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
  2181. spin_lock_bh(&ar->data_lock);
  2182. if (arvif->beacon) {
  2183. switch (arvif->beacon_state) {
  2184. case ATH10K_BEACON_SENT:
  2185. break;
  2186. case ATH10K_BEACON_SCHEDULED:
  2187. ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
  2188. arvif->vdev_id);
  2189. break;
  2190. case ATH10K_BEACON_SENDING:
  2191. ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
  2192. arvif->vdev_id);
  2193. dev_kfree_skb(bcn);
  2194. goto skip;
  2195. }
  2196. ath10k_mac_vif_beacon_free(arvif);
  2197. }
  2198. if (!arvif->beacon_buf) {
  2199. paddr = dma_map_single(arvif->ar->dev, bcn->data,
  2200. bcn->len, DMA_TO_DEVICE);
  2201. ret = dma_mapping_error(arvif->ar->dev, paddr);
  2202. if (ret) {
  2203. ath10k_warn(ar, "failed to map beacon: %d\n",
  2204. ret);
  2205. dev_kfree_skb_any(bcn);
  2206. goto skip;
  2207. }
  2208. ATH10K_SKB_CB(bcn)->paddr = paddr;
  2209. } else {
  2210. if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
  2211. ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
  2212. bcn->len, IEEE80211_MAX_FRAME_LEN);
  2213. skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
  2214. }
  2215. memcpy(arvif->beacon_buf, bcn->data, bcn->len);
  2216. ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
  2217. }
  2218. arvif->beacon = bcn;
  2219. arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
  2220. trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
  2221. trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
  2222. skip:
  2223. spin_unlock_bh(&ar->data_lock);
  2224. }
  2225. ath10k_wmi_tx_beacons_nowait(ar);
  2226. }
  2227. void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
  2228. {
  2229. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
  2230. }
  2231. static void ath10k_dfs_radar_report(struct ath10k *ar,
  2232. const struct wmi_phyerr *phyerr,
  2233. const struct phyerr_radar_report *rr,
  2234. u64 tsf)
  2235. {
  2236. u32 reg0, reg1, tsf32l;
  2237. struct pulse_event pe;
  2238. u64 tsf64;
  2239. u8 rssi, width;
  2240. reg0 = __le32_to_cpu(rr->reg0);
  2241. reg1 = __le32_to_cpu(rr->reg1);
  2242. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  2243. "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
  2244. MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
  2245. MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
  2246. MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
  2247. MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
  2248. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  2249. "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
  2250. MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
  2251. MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
  2252. MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
  2253. MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
  2254. MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
  2255. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  2256. "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
  2257. MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
  2258. MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
  2259. if (!ar->dfs_detector)
  2260. return;
  2261. /* report event to DFS pattern detector */
  2262. tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
  2263. tsf64 = tsf & (~0xFFFFFFFFULL);
  2264. tsf64 |= tsf32l;
  2265. width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
  2266. rssi = phyerr->rssi_combined;
  2267. /* hardware store this as 8 bit signed value,
  2268. * set to zero if negative number
  2269. */
  2270. if (rssi & 0x80)
  2271. rssi = 0;
  2272. pe.ts = tsf64;
  2273. pe.freq = ar->hw->conf.chandef.chan->center_freq;
  2274. pe.width = width;
  2275. pe.rssi = rssi;
  2276. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  2277. "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
  2278. pe.freq, pe.width, pe.rssi, pe.ts);
  2279. ATH10K_DFS_STAT_INC(ar, pulses_detected);
  2280. if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
  2281. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  2282. "dfs no pulse pattern detected, yet\n");
  2283. return;
  2284. }
  2285. ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
  2286. ATH10K_DFS_STAT_INC(ar, radar_detected);
  2287. /* Control radar events reporting in debugfs file
  2288. dfs_block_radar_events */
  2289. if (ar->dfs_block_radar_events) {
  2290. ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
  2291. return;
  2292. }
  2293. ieee80211_radar_detected(ar->hw);
  2294. }
  2295. static int ath10k_dfs_fft_report(struct ath10k *ar,
  2296. const struct wmi_phyerr *phyerr,
  2297. const struct phyerr_fft_report *fftr,
  2298. u64 tsf)
  2299. {
  2300. u32 reg0, reg1;
  2301. u8 rssi, peak_mag;
  2302. reg0 = __le32_to_cpu(fftr->reg0);
  2303. reg1 = __le32_to_cpu(fftr->reg1);
  2304. rssi = phyerr->rssi_combined;
  2305. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  2306. "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
  2307. MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
  2308. MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
  2309. MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
  2310. MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
  2311. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  2312. "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
  2313. MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
  2314. MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
  2315. MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
  2316. MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
  2317. peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
  2318. /* false event detection */
  2319. if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
  2320. peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
  2321. ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
  2322. ATH10K_DFS_STAT_INC(ar, pulses_discarded);
  2323. return -EINVAL;
  2324. }
  2325. return 0;
  2326. }
  2327. void ath10k_wmi_event_dfs(struct ath10k *ar,
  2328. const struct wmi_phyerr *phyerr,
  2329. u64 tsf)
  2330. {
  2331. int buf_len, tlv_len, res, i = 0;
  2332. const struct phyerr_tlv *tlv;
  2333. const struct phyerr_radar_report *rr;
  2334. const struct phyerr_fft_report *fftr;
  2335. const u8 *tlv_buf;
  2336. buf_len = __le32_to_cpu(phyerr->buf_len);
  2337. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  2338. "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
  2339. phyerr->phy_err_code, phyerr->rssi_combined,
  2340. __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len);
  2341. /* Skip event if DFS disabled */
  2342. if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
  2343. return;
  2344. ATH10K_DFS_STAT_INC(ar, pulses_total);
  2345. while (i < buf_len) {
  2346. if (i + sizeof(*tlv) > buf_len) {
  2347. ath10k_warn(ar, "too short buf for tlv header (%d)\n",
  2348. i);
  2349. return;
  2350. }
  2351. tlv = (struct phyerr_tlv *)&phyerr->buf[i];
  2352. tlv_len = __le16_to_cpu(tlv->len);
  2353. tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
  2354. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  2355. "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
  2356. tlv_len, tlv->tag, tlv->sig);
  2357. switch (tlv->tag) {
  2358. case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
  2359. if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
  2360. ath10k_warn(ar, "too short radar pulse summary (%d)\n",
  2361. i);
  2362. return;
  2363. }
  2364. rr = (struct phyerr_radar_report *)tlv_buf;
  2365. ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
  2366. break;
  2367. case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
  2368. if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
  2369. ath10k_warn(ar, "too short fft report (%d)\n",
  2370. i);
  2371. return;
  2372. }
  2373. fftr = (struct phyerr_fft_report *)tlv_buf;
  2374. res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
  2375. if (res)
  2376. return;
  2377. break;
  2378. }
  2379. i += sizeof(*tlv) + tlv_len;
  2380. }
  2381. }
  2382. void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
  2383. const struct wmi_phyerr *phyerr,
  2384. u64 tsf)
  2385. {
  2386. int buf_len, tlv_len, res, i = 0;
  2387. struct phyerr_tlv *tlv;
  2388. const void *tlv_buf;
  2389. const struct phyerr_fft_report *fftr;
  2390. size_t fftr_len;
  2391. buf_len = __le32_to_cpu(phyerr->buf_len);
  2392. while (i < buf_len) {
  2393. if (i + sizeof(*tlv) > buf_len) {
  2394. ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
  2395. i);
  2396. return;
  2397. }
  2398. tlv = (struct phyerr_tlv *)&phyerr->buf[i];
  2399. tlv_len = __le16_to_cpu(tlv->len);
  2400. tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
  2401. if (i + sizeof(*tlv) + tlv_len > buf_len) {
  2402. ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
  2403. i);
  2404. return;
  2405. }
  2406. switch (tlv->tag) {
  2407. case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
  2408. if (sizeof(*fftr) > tlv_len) {
  2409. ath10k_warn(ar, "failed to parse fft report at byte %d\n",
  2410. i);
  2411. return;
  2412. }
  2413. fftr_len = tlv_len - sizeof(*fftr);
  2414. fftr = tlv_buf;
  2415. res = ath10k_spectral_process_fft(ar, phyerr,
  2416. fftr, fftr_len,
  2417. tsf);
  2418. if (res < 0) {
  2419. ath10k_warn(ar, "failed to process fft report: %d\n",
  2420. res);
  2421. return;
  2422. }
  2423. break;
  2424. }
  2425. i += sizeof(*tlv) + tlv_len;
  2426. }
  2427. }
  2428. static int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, struct sk_buff *skb,
  2429. struct wmi_phyerr_ev_arg *arg)
  2430. {
  2431. struct wmi_phyerr_event *ev = (void *)skb->data;
  2432. if (skb->len < sizeof(*ev))
  2433. return -EPROTO;
  2434. arg->num_phyerrs = ev->num_phyerrs;
  2435. arg->tsf_l32 = ev->tsf_l32;
  2436. arg->tsf_u32 = ev->tsf_u32;
  2437. arg->buf_len = __cpu_to_le32(skb->len - sizeof(*ev));
  2438. arg->phyerrs = ev->phyerrs;
  2439. return 0;
  2440. }
  2441. void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
  2442. {
  2443. struct wmi_phyerr_ev_arg arg = {};
  2444. const struct wmi_phyerr *phyerr;
  2445. u32 count, i, buf_len, phy_err_code;
  2446. u64 tsf;
  2447. int left_len, ret;
  2448. ATH10K_DFS_STAT_INC(ar, phy_errors);
  2449. ret = ath10k_wmi_pull_phyerr(ar, skb, &arg);
  2450. if (ret) {
  2451. ath10k_warn(ar, "failed to parse phyerr event: %d\n", ret);
  2452. return;
  2453. }
  2454. left_len = __le32_to_cpu(arg.buf_len);
  2455. /* Check number of included events */
  2456. count = __le32_to_cpu(arg.num_phyerrs);
  2457. tsf = __le32_to_cpu(arg.tsf_u32);
  2458. tsf <<= 32;
  2459. tsf |= __le32_to_cpu(arg.tsf_l32);
  2460. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2461. "wmi event phyerr count %d tsf64 0x%llX\n",
  2462. count, tsf);
  2463. phyerr = arg.phyerrs;
  2464. for (i = 0; i < count; i++) {
  2465. /* Check if we can read event header */
  2466. if (left_len < sizeof(*phyerr)) {
  2467. ath10k_warn(ar, "single event (%d) wrong head len\n",
  2468. i);
  2469. return;
  2470. }
  2471. left_len -= sizeof(*phyerr);
  2472. buf_len = __le32_to_cpu(phyerr->buf_len);
  2473. phy_err_code = phyerr->phy_err_code;
  2474. if (left_len < buf_len) {
  2475. ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
  2476. return;
  2477. }
  2478. left_len -= buf_len;
  2479. switch (phy_err_code) {
  2480. case PHY_ERROR_RADAR:
  2481. ath10k_wmi_event_dfs(ar, phyerr, tsf);
  2482. break;
  2483. case PHY_ERROR_SPECTRAL_SCAN:
  2484. ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
  2485. break;
  2486. case PHY_ERROR_FALSE_RADAR_EXT:
  2487. ath10k_wmi_event_dfs(ar, phyerr, tsf);
  2488. ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
  2489. break;
  2490. default:
  2491. break;
  2492. }
  2493. phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len;
  2494. }
  2495. }
  2496. void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
  2497. {
  2498. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
  2499. }
  2500. void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
  2501. {
  2502. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
  2503. }
  2504. void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
  2505. {
  2506. char buf[101], c;
  2507. int i;
  2508. for (i = 0; i < sizeof(buf) - 1; i++) {
  2509. if (i >= skb->len)
  2510. break;
  2511. c = skb->data[i];
  2512. if (c == '\0')
  2513. break;
  2514. if (isascii(c) && isprint(c))
  2515. buf[i] = c;
  2516. else
  2517. buf[i] = '.';
  2518. }
  2519. if (i == sizeof(buf) - 1)
  2520. ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
  2521. /* for some reason the debug prints end with \n, remove that */
  2522. if (skb->data[i - 1] == '\n')
  2523. i--;
  2524. /* the last byte is always reserved for the null character */
  2525. buf[i] = '\0';
  2526. ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
  2527. }
  2528. void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
  2529. {
  2530. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
  2531. }
  2532. void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
  2533. {
  2534. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
  2535. }
  2536. void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
  2537. struct sk_buff *skb)
  2538. {
  2539. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
  2540. }
  2541. void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
  2542. struct sk_buff *skb)
  2543. {
  2544. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
  2545. }
  2546. void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
  2547. {
  2548. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
  2549. }
  2550. void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
  2551. {
  2552. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
  2553. }
  2554. void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
  2555. {
  2556. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
  2557. }
  2558. void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
  2559. {
  2560. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
  2561. }
  2562. void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
  2563. {
  2564. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
  2565. }
  2566. void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
  2567. {
  2568. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
  2569. }
  2570. void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
  2571. {
  2572. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
  2573. }
  2574. void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
  2575. {
  2576. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
  2577. }
  2578. void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
  2579. {
  2580. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
  2581. }
  2582. void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
  2583. struct sk_buff *skb)
  2584. {
  2585. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
  2586. }
  2587. void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
  2588. {
  2589. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
  2590. }
  2591. void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
  2592. {
  2593. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
  2594. }
  2595. void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
  2596. {
  2597. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
  2598. }
  2599. static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
  2600. u32 num_units, u32 unit_len)
  2601. {
  2602. dma_addr_t paddr;
  2603. u32 pool_size;
  2604. int idx = ar->wmi.num_mem_chunks;
  2605. pool_size = num_units * round_up(unit_len, 4);
  2606. if (!pool_size)
  2607. return -EINVAL;
  2608. ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
  2609. pool_size,
  2610. &paddr,
  2611. GFP_ATOMIC);
  2612. if (!ar->wmi.mem_chunks[idx].vaddr) {
  2613. ath10k_warn(ar, "failed to allocate memory chunk\n");
  2614. return -ENOMEM;
  2615. }
  2616. memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
  2617. ar->wmi.mem_chunks[idx].paddr = paddr;
  2618. ar->wmi.mem_chunks[idx].len = pool_size;
  2619. ar->wmi.mem_chunks[idx].req_id = req_id;
  2620. ar->wmi.num_mem_chunks++;
  2621. return 0;
  2622. }
  2623. static int
  2624. ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
  2625. struct wmi_svc_rdy_ev_arg *arg)
  2626. {
  2627. struct wmi_service_ready_event *ev;
  2628. size_t i, n;
  2629. if (skb->len < sizeof(*ev))
  2630. return -EPROTO;
  2631. ev = (void *)skb->data;
  2632. skb_pull(skb, sizeof(*ev));
  2633. arg->min_tx_power = ev->hw_min_tx_power;
  2634. arg->max_tx_power = ev->hw_max_tx_power;
  2635. arg->ht_cap = ev->ht_cap_info;
  2636. arg->vht_cap = ev->vht_cap_info;
  2637. arg->sw_ver0 = ev->sw_version;
  2638. arg->sw_ver1 = ev->sw_version_1;
  2639. arg->phy_capab = ev->phy_capability;
  2640. arg->num_rf_chains = ev->num_rf_chains;
  2641. arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
  2642. arg->num_mem_reqs = ev->num_mem_reqs;
  2643. arg->service_map = ev->wmi_service_bitmap;
  2644. arg->service_map_len = sizeof(ev->wmi_service_bitmap);
  2645. n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
  2646. ARRAY_SIZE(arg->mem_reqs));
  2647. for (i = 0; i < n; i++)
  2648. arg->mem_reqs[i] = &ev->mem_reqs[i];
  2649. if (skb->len <
  2650. __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
  2651. return -EPROTO;
  2652. return 0;
  2653. }
  2654. static int
  2655. ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
  2656. struct wmi_svc_rdy_ev_arg *arg)
  2657. {
  2658. struct wmi_10x_service_ready_event *ev;
  2659. int i, n;
  2660. if (skb->len < sizeof(*ev))
  2661. return -EPROTO;
  2662. ev = (void *)skb->data;
  2663. skb_pull(skb, sizeof(*ev));
  2664. arg->min_tx_power = ev->hw_min_tx_power;
  2665. arg->max_tx_power = ev->hw_max_tx_power;
  2666. arg->ht_cap = ev->ht_cap_info;
  2667. arg->vht_cap = ev->vht_cap_info;
  2668. arg->sw_ver0 = ev->sw_version;
  2669. arg->phy_capab = ev->phy_capability;
  2670. arg->num_rf_chains = ev->num_rf_chains;
  2671. arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
  2672. arg->num_mem_reqs = ev->num_mem_reqs;
  2673. arg->service_map = ev->wmi_service_bitmap;
  2674. arg->service_map_len = sizeof(ev->wmi_service_bitmap);
  2675. n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
  2676. ARRAY_SIZE(arg->mem_reqs));
  2677. for (i = 0; i < n; i++)
  2678. arg->mem_reqs[i] = &ev->mem_reqs[i];
  2679. if (skb->len <
  2680. __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
  2681. return -EPROTO;
  2682. return 0;
  2683. }
  2684. void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
  2685. {
  2686. struct wmi_svc_rdy_ev_arg arg = {};
  2687. u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
  2688. int ret;
  2689. ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
  2690. if (ret) {
  2691. ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
  2692. return;
  2693. }
  2694. memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
  2695. ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
  2696. arg.service_map_len);
  2697. ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
  2698. ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
  2699. ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
  2700. ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
  2701. ar->fw_version_major =
  2702. (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
  2703. ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
  2704. ar->fw_version_release =
  2705. (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
  2706. ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
  2707. ar->phy_capability = __le32_to_cpu(arg.phy_capab);
  2708. ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
  2709. ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd);
  2710. ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
  2711. arg.service_map, arg.service_map_len);
  2712. /* only manually set fw features when not using FW IE format */
  2713. if (ar->fw_api == 1 && ar->fw_version_build > 636)
  2714. set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
  2715. if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
  2716. ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
  2717. ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
  2718. ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
  2719. }
  2720. ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
  2721. ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1;
  2722. if (strlen(ar->hw->wiphy->fw_version) == 0) {
  2723. snprintf(ar->hw->wiphy->fw_version,
  2724. sizeof(ar->hw->wiphy->fw_version),
  2725. "%u.%u.%u.%u",
  2726. ar->fw_version_major,
  2727. ar->fw_version_minor,
  2728. ar->fw_version_release,
  2729. ar->fw_version_build);
  2730. }
  2731. num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
  2732. if (num_mem_reqs > WMI_MAX_MEM_REQS) {
  2733. ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
  2734. num_mem_reqs);
  2735. return;
  2736. }
  2737. for (i = 0; i < num_mem_reqs; ++i) {
  2738. req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
  2739. num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
  2740. unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
  2741. num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
  2742. if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
  2743. /* number of units to allocate is number of
  2744. * peers, 1 extra for self peer on target */
  2745. /* this needs to be tied, host and target
  2746. * can get out of sync */
  2747. num_units = TARGET_10X_NUM_PEERS + 1;
  2748. else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
  2749. num_units = TARGET_10X_NUM_VDEVS + 1;
  2750. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2751. "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
  2752. req_id,
  2753. __le32_to_cpu(arg.mem_reqs[i]->num_units),
  2754. num_unit_info,
  2755. unit_size,
  2756. num_units);
  2757. ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
  2758. unit_size);
  2759. if (ret)
  2760. return;
  2761. }
  2762. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2763. "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
  2764. __le32_to_cpu(arg.min_tx_power),
  2765. __le32_to_cpu(arg.max_tx_power),
  2766. __le32_to_cpu(arg.ht_cap),
  2767. __le32_to_cpu(arg.vht_cap),
  2768. __le32_to_cpu(arg.sw_ver0),
  2769. __le32_to_cpu(arg.sw_ver1),
  2770. __le32_to_cpu(arg.fw_build),
  2771. __le32_to_cpu(arg.phy_capab),
  2772. __le32_to_cpu(arg.num_rf_chains),
  2773. __le32_to_cpu(arg.eeprom_rd),
  2774. __le32_to_cpu(arg.num_mem_reqs));
  2775. complete(&ar->wmi.service_ready);
  2776. }
  2777. static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
  2778. struct wmi_rdy_ev_arg *arg)
  2779. {
  2780. struct wmi_ready_event *ev = (void *)skb->data;
  2781. if (skb->len < sizeof(*ev))
  2782. return -EPROTO;
  2783. skb_pull(skb, sizeof(*ev));
  2784. arg->sw_version = ev->sw_version;
  2785. arg->abi_version = ev->abi_version;
  2786. arg->status = ev->status;
  2787. arg->mac_addr = ev->mac_addr.addr;
  2788. return 0;
  2789. }
  2790. int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
  2791. {
  2792. struct wmi_rdy_ev_arg arg = {};
  2793. int ret;
  2794. ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
  2795. if (ret) {
  2796. ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
  2797. return ret;
  2798. }
  2799. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2800. "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
  2801. __le32_to_cpu(arg.sw_version),
  2802. __le32_to_cpu(arg.abi_version),
  2803. arg.mac_addr,
  2804. __le32_to_cpu(arg.status));
  2805. ether_addr_copy(ar->mac_addr, arg.mac_addr);
  2806. complete(&ar->wmi.unified_ready);
  2807. return 0;
  2808. }
  2809. static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
  2810. {
  2811. const struct wmi_pdev_temperature_event *ev;
  2812. ev = (struct wmi_pdev_temperature_event *)skb->data;
  2813. if (WARN_ON(skb->len < sizeof(*ev)))
  2814. return -EPROTO;
  2815. ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
  2816. return 0;
  2817. }
  2818. static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
  2819. {
  2820. struct wmi_cmd_hdr *cmd_hdr;
  2821. enum wmi_event_id id;
  2822. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  2823. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  2824. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  2825. return;
  2826. trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  2827. switch (id) {
  2828. case WMI_MGMT_RX_EVENTID:
  2829. ath10k_wmi_event_mgmt_rx(ar, skb);
  2830. /* mgmt_rx() owns the skb now! */
  2831. return;
  2832. case WMI_SCAN_EVENTID:
  2833. ath10k_wmi_event_scan(ar, skb);
  2834. break;
  2835. case WMI_CHAN_INFO_EVENTID:
  2836. ath10k_wmi_event_chan_info(ar, skb);
  2837. break;
  2838. case WMI_ECHO_EVENTID:
  2839. ath10k_wmi_event_echo(ar, skb);
  2840. break;
  2841. case WMI_DEBUG_MESG_EVENTID:
  2842. ath10k_wmi_event_debug_mesg(ar, skb);
  2843. break;
  2844. case WMI_UPDATE_STATS_EVENTID:
  2845. ath10k_wmi_event_update_stats(ar, skb);
  2846. break;
  2847. case WMI_VDEV_START_RESP_EVENTID:
  2848. ath10k_wmi_event_vdev_start_resp(ar, skb);
  2849. break;
  2850. case WMI_VDEV_STOPPED_EVENTID:
  2851. ath10k_wmi_event_vdev_stopped(ar, skb);
  2852. break;
  2853. case WMI_PEER_STA_KICKOUT_EVENTID:
  2854. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  2855. break;
  2856. case WMI_HOST_SWBA_EVENTID:
  2857. ath10k_wmi_event_host_swba(ar, skb);
  2858. break;
  2859. case WMI_TBTTOFFSET_UPDATE_EVENTID:
  2860. ath10k_wmi_event_tbttoffset_update(ar, skb);
  2861. break;
  2862. case WMI_PHYERR_EVENTID:
  2863. ath10k_wmi_event_phyerr(ar, skb);
  2864. break;
  2865. case WMI_ROAM_EVENTID:
  2866. ath10k_wmi_event_roam(ar, skb);
  2867. break;
  2868. case WMI_PROFILE_MATCH:
  2869. ath10k_wmi_event_profile_match(ar, skb);
  2870. break;
  2871. case WMI_DEBUG_PRINT_EVENTID:
  2872. ath10k_wmi_event_debug_print(ar, skb);
  2873. break;
  2874. case WMI_PDEV_QVIT_EVENTID:
  2875. ath10k_wmi_event_pdev_qvit(ar, skb);
  2876. break;
  2877. case WMI_WLAN_PROFILE_DATA_EVENTID:
  2878. ath10k_wmi_event_wlan_profile_data(ar, skb);
  2879. break;
  2880. case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
  2881. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  2882. break;
  2883. case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
  2884. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  2885. break;
  2886. case WMI_RTT_ERROR_REPORT_EVENTID:
  2887. ath10k_wmi_event_rtt_error_report(ar, skb);
  2888. break;
  2889. case WMI_WOW_WAKEUP_HOST_EVENTID:
  2890. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  2891. break;
  2892. case WMI_DCS_INTERFERENCE_EVENTID:
  2893. ath10k_wmi_event_dcs_interference(ar, skb);
  2894. break;
  2895. case WMI_PDEV_TPC_CONFIG_EVENTID:
  2896. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  2897. break;
  2898. case WMI_PDEV_FTM_INTG_EVENTID:
  2899. ath10k_wmi_event_pdev_ftm_intg(ar, skb);
  2900. break;
  2901. case WMI_GTK_OFFLOAD_STATUS_EVENTID:
  2902. ath10k_wmi_event_gtk_offload_status(ar, skb);
  2903. break;
  2904. case WMI_GTK_REKEY_FAIL_EVENTID:
  2905. ath10k_wmi_event_gtk_rekey_fail(ar, skb);
  2906. break;
  2907. case WMI_TX_DELBA_COMPLETE_EVENTID:
  2908. ath10k_wmi_event_delba_complete(ar, skb);
  2909. break;
  2910. case WMI_TX_ADDBA_COMPLETE_EVENTID:
  2911. ath10k_wmi_event_addba_complete(ar, skb);
  2912. break;
  2913. case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
  2914. ath10k_wmi_event_vdev_install_key_complete(ar, skb);
  2915. break;
  2916. case WMI_SERVICE_READY_EVENTID:
  2917. ath10k_wmi_event_service_ready(ar, skb);
  2918. break;
  2919. case WMI_READY_EVENTID:
  2920. ath10k_wmi_event_ready(ar, skb);
  2921. break;
  2922. default:
  2923. ath10k_warn(ar, "Unknown eventid: %d\n", id);
  2924. break;
  2925. }
  2926. dev_kfree_skb(skb);
  2927. }
  2928. static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
  2929. {
  2930. struct wmi_cmd_hdr *cmd_hdr;
  2931. enum wmi_10x_event_id id;
  2932. bool consumed;
  2933. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  2934. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  2935. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  2936. return;
  2937. trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  2938. consumed = ath10k_tm_event_wmi(ar, id, skb);
  2939. /* Ready event must be handled normally also in UTF mode so that we
  2940. * know the UTF firmware has booted, others we are just bypass WMI
  2941. * events to testmode.
  2942. */
  2943. if (consumed && id != WMI_10X_READY_EVENTID) {
  2944. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2945. "wmi testmode consumed 0x%x\n", id);
  2946. goto out;
  2947. }
  2948. switch (id) {
  2949. case WMI_10X_MGMT_RX_EVENTID:
  2950. ath10k_wmi_event_mgmt_rx(ar, skb);
  2951. /* mgmt_rx() owns the skb now! */
  2952. return;
  2953. case WMI_10X_SCAN_EVENTID:
  2954. ath10k_wmi_event_scan(ar, skb);
  2955. break;
  2956. case WMI_10X_CHAN_INFO_EVENTID:
  2957. ath10k_wmi_event_chan_info(ar, skb);
  2958. break;
  2959. case WMI_10X_ECHO_EVENTID:
  2960. ath10k_wmi_event_echo(ar, skb);
  2961. break;
  2962. case WMI_10X_DEBUG_MESG_EVENTID:
  2963. ath10k_wmi_event_debug_mesg(ar, skb);
  2964. break;
  2965. case WMI_10X_UPDATE_STATS_EVENTID:
  2966. ath10k_wmi_event_update_stats(ar, skb);
  2967. break;
  2968. case WMI_10X_VDEV_START_RESP_EVENTID:
  2969. ath10k_wmi_event_vdev_start_resp(ar, skb);
  2970. break;
  2971. case WMI_10X_VDEV_STOPPED_EVENTID:
  2972. ath10k_wmi_event_vdev_stopped(ar, skb);
  2973. break;
  2974. case WMI_10X_PEER_STA_KICKOUT_EVENTID:
  2975. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  2976. break;
  2977. case WMI_10X_HOST_SWBA_EVENTID:
  2978. ath10k_wmi_event_host_swba(ar, skb);
  2979. break;
  2980. case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
  2981. ath10k_wmi_event_tbttoffset_update(ar, skb);
  2982. break;
  2983. case WMI_10X_PHYERR_EVENTID:
  2984. ath10k_wmi_event_phyerr(ar, skb);
  2985. break;
  2986. case WMI_10X_ROAM_EVENTID:
  2987. ath10k_wmi_event_roam(ar, skb);
  2988. break;
  2989. case WMI_10X_PROFILE_MATCH:
  2990. ath10k_wmi_event_profile_match(ar, skb);
  2991. break;
  2992. case WMI_10X_DEBUG_PRINT_EVENTID:
  2993. ath10k_wmi_event_debug_print(ar, skb);
  2994. break;
  2995. case WMI_10X_PDEV_QVIT_EVENTID:
  2996. ath10k_wmi_event_pdev_qvit(ar, skb);
  2997. break;
  2998. case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
  2999. ath10k_wmi_event_wlan_profile_data(ar, skb);
  3000. break;
  3001. case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
  3002. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  3003. break;
  3004. case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
  3005. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  3006. break;
  3007. case WMI_10X_RTT_ERROR_REPORT_EVENTID:
  3008. ath10k_wmi_event_rtt_error_report(ar, skb);
  3009. break;
  3010. case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
  3011. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  3012. break;
  3013. case WMI_10X_DCS_INTERFERENCE_EVENTID:
  3014. ath10k_wmi_event_dcs_interference(ar, skb);
  3015. break;
  3016. case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
  3017. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  3018. break;
  3019. case WMI_10X_INST_RSSI_STATS_EVENTID:
  3020. ath10k_wmi_event_inst_rssi_stats(ar, skb);
  3021. break;
  3022. case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
  3023. ath10k_wmi_event_vdev_standby_req(ar, skb);
  3024. break;
  3025. case WMI_10X_VDEV_RESUME_REQ_EVENTID:
  3026. ath10k_wmi_event_vdev_resume_req(ar, skb);
  3027. break;
  3028. case WMI_10X_SERVICE_READY_EVENTID:
  3029. ath10k_wmi_event_service_ready(ar, skb);
  3030. break;
  3031. case WMI_10X_READY_EVENTID:
  3032. ath10k_wmi_event_ready(ar, skb);
  3033. break;
  3034. case WMI_10X_PDEV_UTF_EVENTID:
  3035. /* ignore utf events */
  3036. break;
  3037. default:
  3038. ath10k_warn(ar, "Unknown eventid: %d\n", id);
  3039. break;
  3040. }
  3041. out:
  3042. dev_kfree_skb(skb);
  3043. }
  3044. static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
  3045. {
  3046. struct wmi_cmd_hdr *cmd_hdr;
  3047. enum wmi_10_2_event_id id;
  3048. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  3049. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  3050. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  3051. return;
  3052. trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  3053. switch (id) {
  3054. case WMI_10_2_MGMT_RX_EVENTID:
  3055. ath10k_wmi_event_mgmt_rx(ar, skb);
  3056. /* mgmt_rx() owns the skb now! */
  3057. return;
  3058. case WMI_10_2_SCAN_EVENTID:
  3059. ath10k_wmi_event_scan(ar, skb);
  3060. break;
  3061. case WMI_10_2_CHAN_INFO_EVENTID:
  3062. ath10k_wmi_event_chan_info(ar, skb);
  3063. break;
  3064. case WMI_10_2_ECHO_EVENTID:
  3065. ath10k_wmi_event_echo(ar, skb);
  3066. break;
  3067. case WMI_10_2_DEBUG_MESG_EVENTID:
  3068. ath10k_wmi_event_debug_mesg(ar, skb);
  3069. break;
  3070. case WMI_10_2_UPDATE_STATS_EVENTID:
  3071. ath10k_wmi_event_update_stats(ar, skb);
  3072. break;
  3073. case WMI_10_2_VDEV_START_RESP_EVENTID:
  3074. ath10k_wmi_event_vdev_start_resp(ar, skb);
  3075. break;
  3076. case WMI_10_2_VDEV_STOPPED_EVENTID:
  3077. ath10k_wmi_event_vdev_stopped(ar, skb);
  3078. break;
  3079. case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
  3080. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  3081. break;
  3082. case WMI_10_2_HOST_SWBA_EVENTID:
  3083. ath10k_wmi_event_host_swba(ar, skb);
  3084. break;
  3085. case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
  3086. ath10k_wmi_event_tbttoffset_update(ar, skb);
  3087. break;
  3088. case WMI_10_2_PHYERR_EVENTID:
  3089. ath10k_wmi_event_phyerr(ar, skb);
  3090. break;
  3091. case WMI_10_2_ROAM_EVENTID:
  3092. ath10k_wmi_event_roam(ar, skb);
  3093. break;
  3094. case WMI_10_2_PROFILE_MATCH:
  3095. ath10k_wmi_event_profile_match(ar, skb);
  3096. break;
  3097. case WMI_10_2_DEBUG_PRINT_EVENTID:
  3098. ath10k_wmi_event_debug_print(ar, skb);
  3099. break;
  3100. case WMI_10_2_PDEV_QVIT_EVENTID:
  3101. ath10k_wmi_event_pdev_qvit(ar, skb);
  3102. break;
  3103. case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
  3104. ath10k_wmi_event_wlan_profile_data(ar, skb);
  3105. break;
  3106. case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
  3107. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  3108. break;
  3109. case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
  3110. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  3111. break;
  3112. case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
  3113. ath10k_wmi_event_rtt_error_report(ar, skb);
  3114. break;
  3115. case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
  3116. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  3117. break;
  3118. case WMI_10_2_DCS_INTERFERENCE_EVENTID:
  3119. ath10k_wmi_event_dcs_interference(ar, skb);
  3120. break;
  3121. case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
  3122. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  3123. break;
  3124. case WMI_10_2_INST_RSSI_STATS_EVENTID:
  3125. ath10k_wmi_event_inst_rssi_stats(ar, skb);
  3126. break;
  3127. case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
  3128. ath10k_wmi_event_vdev_standby_req(ar, skb);
  3129. break;
  3130. case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
  3131. ath10k_wmi_event_vdev_resume_req(ar, skb);
  3132. break;
  3133. case WMI_10_2_SERVICE_READY_EVENTID:
  3134. ath10k_wmi_event_service_ready(ar, skb);
  3135. break;
  3136. case WMI_10_2_READY_EVENTID:
  3137. ath10k_wmi_event_ready(ar, skb);
  3138. break;
  3139. case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
  3140. ath10k_wmi_event_temperature(ar, skb);
  3141. break;
  3142. case WMI_10_2_RTT_KEEPALIVE_EVENTID:
  3143. case WMI_10_2_GPIO_INPUT_EVENTID:
  3144. case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
  3145. case WMI_10_2_GENERIC_BUFFER_EVENTID:
  3146. case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
  3147. case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
  3148. case WMI_10_2_WDS_PEER_EVENTID:
  3149. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3150. "received event id %d not implemented\n", id);
  3151. break;
  3152. default:
  3153. ath10k_warn(ar, "Unknown eventid: %d\n", id);
  3154. break;
  3155. }
  3156. dev_kfree_skb(skb);
  3157. }
  3158. static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
  3159. {
  3160. int ret;
  3161. ret = ath10k_wmi_rx(ar, skb);
  3162. if (ret)
  3163. ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
  3164. }
  3165. int ath10k_wmi_connect(struct ath10k *ar)
  3166. {
  3167. int status;
  3168. struct ath10k_htc_svc_conn_req conn_req;
  3169. struct ath10k_htc_svc_conn_resp conn_resp;
  3170. memset(&conn_req, 0, sizeof(conn_req));
  3171. memset(&conn_resp, 0, sizeof(conn_resp));
  3172. /* these fields are the same for all service endpoints */
  3173. conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
  3174. conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
  3175. conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
  3176. /* connect to control service */
  3177. conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
  3178. status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
  3179. if (status) {
  3180. ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
  3181. status);
  3182. return status;
  3183. }
  3184. ar->wmi.eid = conn_resp.eid;
  3185. return 0;
  3186. }
  3187. static struct sk_buff *
  3188. ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
  3189. u16 ctl2g, u16 ctl5g,
  3190. enum wmi_dfs_region dfs_reg)
  3191. {
  3192. struct wmi_pdev_set_regdomain_cmd *cmd;
  3193. struct sk_buff *skb;
  3194. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3195. if (!skb)
  3196. return ERR_PTR(-ENOMEM);
  3197. cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
  3198. cmd->reg_domain = __cpu_to_le32(rd);
  3199. cmd->reg_domain_2G = __cpu_to_le32(rd2g);
  3200. cmd->reg_domain_5G = __cpu_to_le32(rd5g);
  3201. cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
  3202. cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
  3203. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3204. "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
  3205. rd, rd2g, rd5g, ctl2g, ctl5g);
  3206. return skb;
  3207. }
  3208. static struct sk_buff *
  3209. ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
  3210. rd5g, u16 ctl2g, u16 ctl5g,
  3211. enum wmi_dfs_region dfs_reg)
  3212. {
  3213. struct wmi_pdev_set_regdomain_cmd_10x *cmd;
  3214. struct sk_buff *skb;
  3215. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3216. if (!skb)
  3217. return ERR_PTR(-ENOMEM);
  3218. cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
  3219. cmd->reg_domain = __cpu_to_le32(rd);
  3220. cmd->reg_domain_2G = __cpu_to_le32(rd2g);
  3221. cmd->reg_domain_5G = __cpu_to_le32(rd5g);
  3222. cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
  3223. cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
  3224. cmd->dfs_domain = __cpu_to_le32(dfs_reg);
  3225. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3226. "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
  3227. rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
  3228. return skb;
  3229. }
  3230. static struct sk_buff *
  3231. ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
  3232. {
  3233. struct wmi_pdev_suspend_cmd *cmd;
  3234. struct sk_buff *skb;
  3235. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3236. if (!skb)
  3237. return ERR_PTR(-ENOMEM);
  3238. cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
  3239. cmd->suspend_opt = __cpu_to_le32(suspend_opt);
  3240. return skb;
  3241. }
  3242. static struct sk_buff *
  3243. ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
  3244. {
  3245. struct sk_buff *skb;
  3246. skb = ath10k_wmi_alloc_skb(ar, 0);
  3247. if (!skb)
  3248. return ERR_PTR(-ENOMEM);
  3249. return skb;
  3250. }
  3251. static struct sk_buff *
  3252. ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
  3253. {
  3254. struct wmi_pdev_set_param_cmd *cmd;
  3255. struct sk_buff *skb;
  3256. if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
  3257. ath10k_warn(ar, "pdev param %d not supported by firmware\n",
  3258. id);
  3259. return ERR_PTR(-EOPNOTSUPP);
  3260. }
  3261. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3262. if (!skb)
  3263. return ERR_PTR(-ENOMEM);
  3264. cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
  3265. cmd->param_id = __cpu_to_le32(id);
  3266. cmd->param_value = __cpu_to_le32(value);
  3267. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
  3268. id, value);
  3269. return skb;
  3270. }
  3271. void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
  3272. struct wmi_host_mem_chunks *chunks)
  3273. {
  3274. struct host_memory_chunk *chunk;
  3275. int i;
  3276. chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
  3277. for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  3278. chunk = &chunks->items[i];
  3279. chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
  3280. chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
  3281. chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
  3282. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3283. "wmi chunk %d len %d requested, addr 0x%llx\n",
  3284. i,
  3285. ar->wmi.mem_chunks[i].len,
  3286. (unsigned long long)ar->wmi.mem_chunks[i].paddr);
  3287. }
  3288. }
  3289. static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
  3290. {
  3291. struct wmi_init_cmd *cmd;
  3292. struct sk_buff *buf;
  3293. struct wmi_resource_config config = {};
  3294. u32 len, val;
  3295. config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
  3296. config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
  3297. config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
  3298. config.num_offload_reorder_bufs =
  3299. __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
  3300. config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
  3301. config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
  3302. config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
  3303. config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
  3304. config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
  3305. config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  3306. config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  3307. config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  3308. config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
  3309. config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
  3310. config.scan_max_pending_reqs =
  3311. __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
  3312. config.bmiss_offload_max_vdev =
  3313. __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
  3314. config.roam_offload_max_vdev =
  3315. __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
  3316. config.roam_offload_max_ap_profiles =
  3317. __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
  3318. config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
  3319. config.num_mcast_table_elems =
  3320. __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
  3321. config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
  3322. config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
  3323. config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
  3324. config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
  3325. config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
  3326. val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  3327. config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  3328. config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
  3329. config.gtk_offload_max_vdev =
  3330. __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
  3331. config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
  3332. config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
  3333. len = sizeof(*cmd) +
  3334. (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  3335. buf = ath10k_wmi_alloc_skb(ar, len);
  3336. if (!buf)
  3337. return ERR_PTR(-ENOMEM);
  3338. cmd = (struct wmi_init_cmd *)buf->data;
  3339. memcpy(&cmd->resource_config, &config, sizeof(config));
  3340. ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
  3341. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
  3342. return buf;
  3343. }
  3344. static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
  3345. {
  3346. struct wmi_init_cmd_10x *cmd;
  3347. struct sk_buff *buf;
  3348. struct wmi_resource_config_10x config = {};
  3349. u32 len, val;
  3350. config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
  3351. config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
  3352. config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
  3353. config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
  3354. config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
  3355. config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
  3356. config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
  3357. config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  3358. config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  3359. config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  3360. config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
  3361. config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
  3362. config.scan_max_pending_reqs =
  3363. __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
  3364. config.bmiss_offload_max_vdev =
  3365. __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
  3366. config.roam_offload_max_vdev =
  3367. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
  3368. config.roam_offload_max_ap_profiles =
  3369. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
  3370. config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
  3371. config.num_mcast_table_elems =
  3372. __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
  3373. config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
  3374. config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
  3375. config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
  3376. config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
  3377. config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
  3378. val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  3379. config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  3380. config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
  3381. config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
  3382. config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
  3383. len = sizeof(*cmd) +
  3384. (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  3385. buf = ath10k_wmi_alloc_skb(ar, len);
  3386. if (!buf)
  3387. return ERR_PTR(-ENOMEM);
  3388. cmd = (struct wmi_init_cmd_10x *)buf->data;
  3389. memcpy(&cmd->resource_config, &config, sizeof(config));
  3390. ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
  3391. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
  3392. return buf;
  3393. }
  3394. static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
  3395. {
  3396. struct wmi_init_cmd_10_2 *cmd;
  3397. struct sk_buff *buf;
  3398. struct wmi_resource_config_10x config = {};
  3399. u32 len, val, features;
  3400. config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
  3401. config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
  3402. config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
  3403. config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
  3404. config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
  3405. config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
  3406. config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
  3407. config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  3408. config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  3409. config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  3410. config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
  3411. config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
  3412. config.scan_max_pending_reqs =
  3413. __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
  3414. config.bmiss_offload_max_vdev =
  3415. __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
  3416. config.roam_offload_max_vdev =
  3417. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
  3418. config.roam_offload_max_ap_profiles =
  3419. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
  3420. config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
  3421. config.num_mcast_table_elems =
  3422. __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
  3423. config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
  3424. config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
  3425. config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
  3426. config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
  3427. config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
  3428. val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  3429. config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  3430. config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
  3431. config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
  3432. config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
  3433. len = sizeof(*cmd) +
  3434. (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  3435. buf = ath10k_wmi_alloc_skb(ar, len);
  3436. if (!buf)
  3437. return ERR_PTR(-ENOMEM);
  3438. cmd = (struct wmi_init_cmd_10_2 *)buf->data;
  3439. features = WMI_10_2_RX_BATCH_MODE;
  3440. cmd->resource_config.feature_mask = __cpu_to_le32(features);
  3441. memcpy(&cmd->resource_config.common, &config, sizeof(config));
  3442. ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
  3443. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
  3444. return buf;
  3445. }
  3446. int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
  3447. {
  3448. if (arg->ie_len && !arg->ie)
  3449. return -EINVAL;
  3450. if (arg->n_channels && !arg->channels)
  3451. return -EINVAL;
  3452. if (arg->n_ssids && !arg->ssids)
  3453. return -EINVAL;
  3454. if (arg->n_bssids && !arg->bssids)
  3455. return -EINVAL;
  3456. if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
  3457. return -EINVAL;
  3458. if (arg->n_channels > ARRAY_SIZE(arg->channels))
  3459. return -EINVAL;
  3460. if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
  3461. return -EINVAL;
  3462. if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
  3463. return -EINVAL;
  3464. return 0;
  3465. }
  3466. static size_t
  3467. ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
  3468. {
  3469. int len = 0;
  3470. if (arg->ie_len) {
  3471. len += sizeof(struct wmi_ie_data);
  3472. len += roundup(arg->ie_len, 4);
  3473. }
  3474. if (arg->n_channels) {
  3475. len += sizeof(struct wmi_chan_list);
  3476. len += sizeof(__le32) * arg->n_channels;
  3477. }
  3478. if (arg->n_ssids) {
  3479. len += sizeof(struct wmi_ssid_list);
  3480. len += sizeof(struct wmi_ssid) * arg->n_ssids;
  3481. }
  3482. if (arg->n_bssids) {
  3483. len += sizeof(struct wmi_bssid_list);
  3484. len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
  3485. }
  3486. return len;
  3487. }
  3488. void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
  3489. const struct wmi_start_scan_arg *arg)
  3490. {
  3491. u32 scan_id;
  3492. u32 scan_req_id;
  3493. scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
  3494. scan_id |= arg->scan_id;
  3495. scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
  3496. scan_req_id |= arg->scan_req_id;
  3497. cmn->scan_id = __cpu_to_le32(scan_id);
  3498. cmn->scan_req_id = __cpu_to_le32(scan_req_id);
  3499. cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
  3500. cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
  3501. cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
  3502. cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
  3503. cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
  3504. cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
  3505. cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
  3506. cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
  3507. cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
  3508. cmn->idle_time = __cpu_to_le32(arg->idle_time);
  3509. cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
  3510. cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
  3511. cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
  3512. }
  3513. static void
  3514. ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
  3515. const struct wmi_start_scan_arg *arg)
  3516. {
  3517. struct wmi_ie_data *ie;
  3518. struct wmi_chan_list *channels;
  3519. struct wmi_ssid_list *ssids;
  3520. struct wmi_bssid_list *bssids;
  3521. void *ptr = tlvs->tlvs;
  3522. int i;
  3523. if (arg->n_channels) {
  3524. channels = ptr;
  3525. channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
  3526. channels->num_chan = __cpu_to_le32(arg->n_channels);
  3527. for (i = 0; i < arg->n_channels; i++)
  3528. channels->channel_list[i].freq =
  3529. __cpu_to_le16(arg->channels[i]);
  3530. ptr += sizeof(*channels);
  3531. ptr += sizeof(__le32) * arg->n_channels;
  3532. }
  3533. if (arg->n_ssids) {
  3534. ssids = ptr;
  3535. ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
  3536. ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
  3537. for (i = 0; i < arg->n_ssids; i++) {
  3538. ssids->ssids[i].ssid_len =
  3539. __cpu_to_le32(arg->ssids[i].len);
  3540. memcpy(&ssids->ssids[i].ssid,
  3541. arg->ssids[i].ssid,
  3542. arg->ssids[i].len);
  3543. }
  3544. ptr += sizeof(*ssids);
  3545. ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
  3546. }
  3547. if (arg->n_bssids) {
  3548. bssids = ptr;
  3549. bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
  3550. bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
  3551. for (i = 0; i < arg->n_bssids; i++)
  3552. memcpy(&bssids->bssid_list[i],
  3553. arg->bssids[i].bssid,
  3554. ETH_ALEN);
  3555. ptr += sizeof(*bssids);
  3556. ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
  3557. }
  3558. if (arg->ie_len) {
  3559. ie = ptr;
  3560. ie->tag = __cpu_to_le32(WMI_IE_TAG);
  3561. ie->ie_len = __cpu_to_le32(arg->ie_len);
  3562. memcpy(ie->ie_data, arg->ie, arg->ie_len);
  3563. ptr += sizeof(*ie);
  3564. ptr += roundup(arg->ie_len, 4);
  3565. }
  3566. }
  3567. static struct sk_buff *
  3568. ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
  3569. const struct wmi_start_scan_arg *arg)
  3570. {
  3571. struct wmi_start_scan_cmd *cmd;
  3572. struct sk_buff *skb;
  3573. size_t len;
  3574. int ret;
  3575. ret = ath10k_wmi_start_scan_verify(arg);
  3576. if (ret)
  3577. return ERR_PTR(ret);
  3578. len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
  3579. skb = ath10k_wmi_alloc_skb(ar, len);
  3580. if (!skb)
  3581. return ERR_PTR(-ENOMEM);
  3582. cmd = (struct wmi_start_scan_cmd *)skb->data;
  3583. ath10k_wmi_put_start_scan_common(&cmd->common, arg);
  3584. ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
  3585. cmd->burst_duration_ms = __cpu_to_le32(0);
  3586. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
  3587. return skb;
  3588. }
  3589. static struct sk_buff *
  3590. ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
  3591. const struct wmi_start_scan_arg *arg)
  3592. {
  3593. struct wmi_10x_start_scan_cmd *cmd;
  3594. struct sk_buff *skb;
  3595. size_t len;
  3596. int ret;
  3597. ret = ath10k_wmi_start_scan_verify(arg);
  3598. if (ret)
  3599. return ERR_PTR(ret);
  3600. len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
  3601. skb = ath10k_wmi_alloc_skb(ar, len);
  3602. if (!skb)
  3603. return ERR_PTR(-ENOMEM);
  3604. cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
  3605. ath10k_wmi_put_start_scan_common(&cmd->common, arg);
  3606. ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
  3607. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
  3608. return skb;
  3609. }
  3610. void ath10k_wmi_start_scan_init(struct ath10k *ar,
  3611. struct wmi_start_scan_arg *arg)
  3612. {
  3613. /* setup commonly used values */
  3614. arg->scan_req_id = 1;
  3615. arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
  3616. arg->dwell_time_active = 50;
  3617. arg->dwell_time_passive = 150;
  3618. arg->min_rest_time = 50;
  3619. arg->max_rest_time = 500;
  3620. arg->repeat_probe_time = 0;
  3621. arg->probe_spacing_time = 0;
  3622. arg->idle_time = 0;
  3623. arg->max_scan_time = 20000;
  3624. arg->probe_delay = 5;
  3625. arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
  3626. | WMI_SCAN_EVENT_COMPLETED
  3627. | WMI_SCAN_EVENT_BSS_CHANNEL
  3628. | WMI_SCAN_EVENT_FOREIGN_CHANNEL
  3629. | WMI_SCAN_EVENT_DEQUEUED;
  3630. arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
  3631. arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
  3632. arg->n_bssids = 1;
  3633. arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
  3634. }
  3635. static struct sk_buff *
  3636. ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
  3637. const struct wmi_stop_scan_arg *arg)
  3638. {
  3639. struct wmi_stop_scan_cmd *cmd;
  3640. struct sk_buff *skb;
  3641. u32 scan_id;
  3642. u32 req_id;
  3643. if (arg->req_id > 0xFFF)
  3644. return ERR_PTR(-EINVAL);
  3645. if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
  3646. return ERR_PTR(-EINVAL);
  3647. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3648. if (!skb)
  3649. return ERR_PTR(-ENOMEM);
  3650. scan_id = arg->u.scan_id;
  3651. scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
  3652. req_id = arg->req_id;
  3653. req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
  3654. cmd = (struct wmi_stop_scan_cmd *)skb->data;
  3655. cmd->req_type = __cpu_to_le32(arg->req_type);
  3656. cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
  3657. cmd->scan_id = __cpu_to_le32(scan_id);
  3658. cmd->scan_req_id = __cpu_to_le32(req_id);
  3659. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3660. "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
  3661. arg->req_id, arg->req_type, arg->u.scan_id);
  3662. return skb;
  3663. }
  3664. static struct sk_buff *
  3665. ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
  3666. enum wmi_vdev_type type,
  3667. enum wmi_vdev_subtype subtype,
  3668. const u8 macaddr[ETH_ALEN])
  3669. {
  3670. struct wmi_vdev_create_cmd *cmd;
  3671. struct sk_buff *skb;
  3672. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3673. if (!skb)
  3674. return ERR_PTR(-ENOMEM);
  3675. cmd = (struct wmi_vdev_create_cmd *)skb->data;
  3676. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3677. cmd->vdev_type = __cpu_to_le32(type);
  3678. cmd->vdev_subtype = __cpu_to_le32(subtype);
  3679. ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
  3680. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3681. "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
  3682. vdev_id, type, subtype, macaddr);
  3683. return skb;
  3684. }
  3685. static struct sk_buff *
  3686. ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
  3687. {
  3688. struct wmi_vdev_delete_cmd *cmd;
  3689. struct sk_buff *skb;
  3690. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3691. if (!skb)
  3692. return ERR_PTR(-ENOMEM);
  3693. cmd = (struct wmi_vdev_delete_cmd *)skb->data;
  3694. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3695. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3696. "WMI vdev delete id %d\n", vdev_id);
  3697. return skb;
  3698. }
  3699. static struct sk_buff *
  3700. ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
  3701. const struct wmi_vdev_start_request_arg *arg,
  3702. bool restart)
  3703. {
  3704. struct wmi_vdev_start_request_cmd *cmd;
  3705. struct sk_buff *skb;
  3706. const char *cmdname;
  3707. u32 flags = 0;
  3708. if (WARN_ON(arg->ssid && arg->ssid_len == 0))
  3709. return ERR_PTR(-EINVAL);
  3710. if (WARN_ON(arg->hidden_ssid && !arg->ssid))
  3711. return ERR_PTR(-EINVAL);
  3712. if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
  3713. return ERR_PTR(-EINVAL);
  3714. if (restart)
  3715. cmdname = "restart";
  3716. else
  3717. cmdname = "start";
  3718. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3719. if (!skb)
  3720. return ERR_PTR(-ENOMEM);
  3721. if (arg->hidden_ssid)
  3722. flags |= WMI_VDEV_START_HIDDEN_SSID;
  3723. if (arg->pmf_enabled)
  3724. flags |= WMI_VDEV_START_PMF_ENABLED;
  3725. cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
  3726. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3727. cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
  3728. cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
  3729. cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
  3730. cmd->flags = __cpu_to_le32(flags);
  3731. cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
  3732. cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
  3733. if (arg->ssid) {
  3734. cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
  3735. memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
  3736. }
  3737. ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
  3738. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3739. "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
  3740. cmdname, arg->vdev_id,
  3741. flags, arg->channel.freq, arg->channel.mode,
  3742. cmd->chan.flags, arg->channel.max_power);
  3743. return skb;
  3744. }
  3745. static struct sk_buff *
  3746. ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
  3747. {
  3748. struct wmi_vdev_stop_cmd *cmd;
  3749. struct sk_buff *skb;
  3750. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3751. if (!skb)
  3752. return ERR_PTR(-ENOMEM);
  3753. cmd = (struct wmi_vdev_stop_cmd *)skb->data;
  3754. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3755. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
  3756. return skb;
  3757. }
  3758. static struct sk_buff *
  3759. ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
  3760. const u8 *bssid)
  3761. {
  3762. struct wmi_vdev_up_cmd *cmd;
  3763. struct sk_buff *skb;
  3764. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3765. if (!skb)
  3766. return ERR_PTR(-ENOMEM);
  3767. cmd = (struct wmi_vdev_up_cmd *)skb->data;
  3768. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3769. cmd->vdev_assoc_id = __cpu_to_le32(aid);
  3770. ether_addr_copy(cmd->vdev_bssid.addr, bssid);
  3771. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3772. "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
  3773. vdev_id, aid, bssid);
  3774. return skb;
  3775. }
  3776. static struct sk_buff *
  3777. ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
  3778. {
  3779. struct wmi_vdev_down_cmd *cmd;
  3780. struct sk_buff *skb;
  3781. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3782. if (!skb)
  3783. return ERR_PTR(-ENOMEM);
  3784. cmd = (struct wmi_vdev_down_cmd *)skb->data;
  3785. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3786. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3787. "wmi mgmt vdev down id 0x%x\n", vdev_id);
  3788. return skb;
  3789. }
  3790. static struct sk_buff *
  3791. ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
  3792. u32 param_id, u32 param_value)
  3793. {
  3794. struct wmi_vdev_set_param_cmd *cmd;
  3795. struct sk_buff *skb;
  3796. if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
  3797. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3798. "vdev param %d not supported by firmware\n",
  3799. param_id);
  3800. return ERR_PTR(-EOPNOTSUPP);
  3801. }
  3802. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3803. if (!skb)
  3804. return ERR_PTR(-ENOMEM);
  3805. cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
  3806. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3807. cmd->param_id = __cpu_to_le32(param_id);
  3808. cmd->param_value = __cpu_to_le32(param_value);
  3809. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3810. "wmi vdev id 0x%x set param %d value %d\n",
  3811. vdev_id, param_id, param_value);
  3812. return skb;
  3813. }
  3814. static struct sk_buff *
  3815. ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
  3816. const struct wmi_vdev_install_key_arg *arg)
  3817. {
  3818. struct wmi_vdev_install_key_cmd *cmd;
  3819. struct sk_buff *skb;
  3820. if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
  3821. return ERR_PTR(-EINVAL);
  3822. if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
  3823. return ERR_PTR(-EINVAL);
  3824. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
  3825. if (!skb)
  3826. return ERR_PTR(-ENOMEM);
  3827. cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
  3828. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3829. cmd->key_idx = __cpu_to_le32(arg->key_idx);
  3830. cmd->key_flags = __cpu_to_le32(arg->key_flags);
  3831. cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
  3832. cmd->key_len = __cpu_to_le32(arg->key_len);
  3833. cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
  3834. cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
  3835. if (arg->macaddr)
  3836. ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
  3837. if (arg->key_data)
  3838. memcpy(cmd->key_data, arg->key_data, arg->key_len);
  3839. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3840. "wmi vdev install key idx %d cipher %d len %d\n",
  3841. arg->key_idx, arg->key_cipher, arg->key_len);
  3842. return skb;
  3843. }
  3844. static struct sk_buff *
  3845. ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
  3846. const struct wmi_vdev_spectral_conf_arg *arg)
  3847. {
  3848. struct wmi_vdev_spectral_conf_cmd *cmd;
  3849. struct sk_buff *skb;
  3850. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3851. if (!skb)
  3852. return ERR_PTR(-ENOMEM);
  3853. cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
  3854. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3855. cmd->scan_count = __cpu_to_le32(arg->scan_count);
  3856. cmd->scan_period = __cpu_to_le32(arg->scan_period);
  3857. cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
  3858. cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
  3859. cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
  3860. cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
  3861. cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
  3862. cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
  3863. cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
  3864. cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
  3865. cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
  3866. cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
  3867. cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
  3868. cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
  3869. cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
  3870. cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
  3871. cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
  3872. cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
  3873. return skb;
  3874. }
  3875. static struct sk_buff *
  3876. ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
  3877. u32 trigger, u32 enable)
  3878. {
  3879. struct wmi_vdev_spectral_enable_cmd *cmd;
  3880. struct sk_buff *skb;
  3881. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3882. if (!skb)
  3883. return ERR_PTR(-ENOMEM);
  3884. cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
  3885. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3886. cmd->trigger_cmd = __cpu_to_le32(trigger);
  3887. cmd->enable_cmd = __cpu_to_le32(enable);
  3888. return skb;
  3889. }
  3890. static struct sk_buff *
  3891. ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
  3892. const u8 peer_addr[ETH_ALEN])
  3893. {
  3894. struct wmi_peer_create_cmd *cmd;
  3895. struct sk_buff *skb;
  3896. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3897. if (!skb)
  3898. return ERR_PTR(-ENOMEM);
  3899. cmd = (struct wmi_peer_create_cmd *)skb->data;
  3900. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3901. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3902. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3903. "wmi peer create vdev_id %d peer_addr %pM\n",
  3904. vdev_id, peer_addr);
  3905. return skb;
  3906. }
  3907. static struct sk_buff *
  3908. ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
  3909. const u8 peer_addr[ETH_ALEN])
  3910. {
  3911. struct wmi_peer_delete_cmd *cmd;
  3912. struct sk_buff *skb;
  3913. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3914. if (!skb)
  3915. return ERR_PTR(-ENOMEM);
  3916. cmd = (struct wmi_peer_delete_cmd *)skb->data;
  3917. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3918. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3919. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3920. "wmi peer delete vdev_id %d peer_addr %pM\n",
  3921. vdev_id, peer_addr);
  3922. return skb;
  3923. }
  3924. static struct sk_buff *
  3925. ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
  3926. const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  3927. {
  3928. struct wmi_peer_flush_tids_cmd *cmd;
  3929. struct sk_buff *skb;
  3930. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3931. if (!skb)
  3932. return ERR_PTR(-ENOMEM);
  3933. cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
  3934. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3935. cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
  3936. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3937. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3938. "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
  3939. vdev_id, peer_addr, tid_bitmap);
  3940. return skb;
  3941. }
  3942. static struct sk_buff *
  3943. ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
  3944. const u8 *peer_addr,
  3945. enum wmi_peer_param param_id,
  3946. u32 param_value)
  3947. {
  3948. struct wmi_peer_set_param_cmd *cmd;
  3949. struct sk_buff *skb;
  3950. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3951. if (!skb)
  3952. return ERR_PTR(-ENOMEM);
  3953. cmd = (struct wmi_peer_set_param_cmd *)skb->data;
  3954. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3955. cmd->param_id = __cpu_to_le32(param_id);
  3956. cmd->param_value = __cpu_to_le32(param_value);
  3957. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3958. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3959. "wmi vdev %d peer 0x%pM set param %d value %d\n",
  3960. vdev_id, peer_addr, param_id, param_value);
  3961. return skb;
  3962. }
  3963. static struct sk_buff *
  3964. ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
  3965. enum wmi_sta_ps_mode psmode)
  3966. {
  3967. struct wmi_sta_powersave_mode_cmd *cmd;
  3968. struct sk_buff *skb;
  3969. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3970. if (!skb)
  3971. return ERR_PTR(-ENOMEM);
  3972. cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
  3973. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3974. cmd->sta_ps_mode = __cpu_to_le32(psmode);
  3975. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3976. "wmi set powersave id 0x%x mode %d\n",
  3977. vdev_id, psmode);
  3978. return skb;
  3979. }
  3980. static struct sk_buff *
  3981. ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
  3982. enum wmi_sta_powersave_param param_id,
  3983. u32 value)
  3984. {
  3985. struct wmi_sta_powersave_param_cmd *cmd;
  3986. struct sk_buff *skb;
  3987. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3988. if (!skb)
  3989. return ERR_PTR(-ENOMEM);
  3990. cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
  3991. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3992. cmd->param_id = __cpu_to_le32(param_id);
  3993. cmd->param_value = __cpu_to_le32(value);
  3994. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3995. "wmi sta ps param vdev_id 0x%x param %d value %d\n",
  3996. vdev_id, param_id, value);
  3997. return skb;
  3998. }
  3999. static struct sk_buff *
  4000. ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  4001. enum wmi_ap_ps_peer_param param_id, u32 value)
  4002. {
  4003. struct wmi_ap_ps_peer_cmd *cmd;
  4004. struct sk_buff *skb;
  4005. if (!mac)
  4006. return ERR_PTR(-EINVAL);
  4007. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  4008. if (!skb)
  4009. return ERR_PTR(-ENOMEM);
  4010. cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
  4011. cmd->vdev_id = __cpu_to_le32(vdev_id);
  4012. cmd->param_id = __cpu_to_le32(param_id);
  4013. cmd->param_value = __cpu_to_le32(value);
  4014. ether_addr_copy(cmd->peer_macaddr.addr, mac);
  4015. ath10k_dbg(ar, ATH10K_DBG_WMI,
  4016. "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
  4017. vdev_id, param_id, value, mac);
  4018. return skb;
  4019. }
  4020. static struct sk_buff *
  4021. ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
  4022. const struct wmi_scan_chan_list_arg *arg)
  4023. {
  4024. struct wmi_scan_chan_list_cmd *cmd;
  4025. struct sk_buff *skb;
  4026. struct wmi_channel_arg *ch;
  4027. struct wmi_channel *ci;
  4028. int len;
  4029. int i;
  4030. len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
  4031. skb = ath10k_wmi_alloc_skb(ar, len);
  4032. if (!skb)
  4033. return ERR_PTR(-EINVAL);
  4034. cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
  4035. cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
  4036. for (i = 0; i < arg->n_channels; i++) {
  4037. ch = &arg->channels[i];
  4038. ci = &cmd->chan_info[i];
  4039. ath10k_wmi_put_wmi_channel(ci, ch);
  4040. }
  4041. return skb;
  4042. }
  4043. static void
  4044. ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
  4045. const struct wmi_peer_assoc_complete_arg *arg)
  4046. {
  4047. struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
  4048. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  4049. cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
  4050. cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
  4051. cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
  4052. cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
  4053. cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
  4054. cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
  4055. cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
  4056. cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
  4057. cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
  4058. cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
  4059. cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
  4060. cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
  4061. ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
  4062. cmd->peer_legacy_rates.num_rates =
  4063. __cpu_to_le32(arg->peer_legacy_rates.num_rates);
  4064. memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
  4065. arg->peer_legacy_rates.num_rates);
  4066. cmd->peer_ht_rates.num_rates =
  4067. __cpu_to_le32(arg->peer_ht_rates.num_rates);
  4068. memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
  4069. arg->peer_ht_rates.num_rates);
  4070. cmd->peer_vht_rates.rx_max_rate =
  4071. __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
  4072. cmd->peer_vht_rates.rx_mcs_set =
  4073. __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
  4074. cmd->peer_vht_rates.tx_max_rate =
  4075. __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
  4076. cmd->peer_vht_rates.tx_mcs_set =
  4077. __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
  4078. }
  4079. static void
  4080. ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
  4081. const struct wmi_peer_assoc_complete_arg *arg)
  4082. {
  4083. struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
  4084. ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  4085. memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
  4086. }
  4087. static void
  4088. ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
  4089. const struct wmi_peer_assoc_complete_arg *arg)
  4090. {
  4091. ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  4092. }
  4093. static void
  4094. ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
  4095. const struct wmi_peer_assoc_complete_arg *arg)
  4096. {
  4097. struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
  4098. int max_mcs, max_nss;
  4099. u32 info0;
  4100. /* TODO: Is using max values okay with firmware? */
  4101. max_mcs = 0xf;
  4102. max_nss = 0xf;
  4103. info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
  4104. SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
  4105. ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  4106. cmd->info0 = __cpu_to_le32(info0);
  4107. }
  4108. static int
  4109. ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
  4110. {
  4111. if (arg->peer_mpdu_density > 16)
  4112. return -EINVAL;
  4113. if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
  4114. return -EINVAL;
  4115. if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
  4116. return -EINVAL;
  4117. return 0;
  4118. }
  4119. static struct sk_buff *
  4120. ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
  4121. const struct wmi_peer_assoc_complete_arg *arg)
  4122. {
  4123. size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
  4124. struct sk_buff *skb;
  4125. int ret;
  4126. ret = ath10k_wmi_peer_assoc_check_arg(arg);
  4127. if (ret)
  4128. return ERR_PTR(ret);
  4129. skb = ath10k_wmi_alloc_skb(ar, len);
  4130. if (!skb)
  4131. return ERR_PTR(-ENOMEM);
  4132. ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
  4133. ath10k_dbg(ar, ATH10K_DBG_WMI,
  4134. "wmi peer assoc vdev %d addr %pM (%s)\n",
  4135. arg->vdev_id, arg->addr,
  4136. arg->peer_reassoc ? "reassociate" : "new");
  4137. return skb;
  4138. }
  4139. static struct sk_buff *
  4140. ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
  4141. const struct wmi_peer_assoc_complete_arg *arg)
  4142. {
  4143. size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
  4144. struct sk_buff *skb;
  4145. int ret;
  4146. ret = ath10k_wmi_peer_assoc_check_arg(arg);
  4147. if (ret)
  4148. return ERR_PTR(ret);
  4149. skb = ath10k_wmi_alloc_skb(ar, len);
  4150. if (!skb)
  4151. return ERR_PTR(-ENOMEM);
  4152. ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
  4153. ath10k_dbg(ar, ATH10K_DBG_WMI,
  4154. "wmi peer assoc vdev %d addr %pM (%s)\n",
  4155. arg->vdev_id, arg->addr,
  4156. arg->peer_reassoc ? "reassociate" : "new");
  4157. return skb;
  4158. }
  4159. static struct sk_buff *
  4160. ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
  4161. const struct wmi_peer_assoc_complete_arg *arg)
  4162. {
  4163. size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
  4164. struct sk_buff *skb;
  4165. int ret;
  4166. ret = ath10k_wmi_peer_assoc_check_arg(arg);
  4167. if (ret)
  4168. return ERR_PTR(ret);
  4169. skb = ath10k_wmi_alloc_skb(ar, len);
  4170. if (!skb)
  4171. return ERR_PTR(-ENOMEM);
  4172. ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
  4173. ath10k_dbg(ar, ATH10K_DBG_WMI,
  4174. "wmi peer assoc vdev %d addr %pM (%s)\n",
  4175. arg->vdev_id, arg->addr,
  4176. arg->peer_reassoc ? "reassociate" : "new");
  4177. return skb;
  4178. }
  4179. static struct sk_buff *
  4180. ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
  4181. {
  4182. struct sk_buff *skb;
  4183. skb = ath10k_wmi_alloc_skb(ar, 0);
  4184. if (!skb)
  4185. return ERR_PTR(-ENOMEM);
  4186. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
  4187. return skb;
  4188. }
  4189. /* This function assumes the beacon is already DMA mapped */
  4190. static struct sk_buff *
  4191. ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
  4192. size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
  4193. bool deliver_cab)
  4194. {
  4195. struct wmi_bcn_tx_ref_cmd *cmd;
  4196. struct sk_buff *skb;
  4197. struct ieee80211_hdr *hdr;
  4198. u16 fc;
  4199. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  4200. if (!skb)
  4201. return ERR_PTR(-ENOMEM);
  4202. hdr = (struct ieee80211_hdr *)bcn;
  4203. fc = le16_to_cpu(hdr->frame_control);
  4204. cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
  4205. cmd->vdev_id = __cpu_to_le32(vdev_id);
  4206. cmd->data_len = __cpu_to_le32(bcn_len);
  4207. cmd->data_ptr = __cpu_to_le32(bcn_paddr);
  4208. cmd->msdu_id = 0;
  4209. cmd->frame_control = __cpu_to_le32(fc);
  4210. cmd->flags = 0;
  4211. cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
  4212. if (dtim_zero)
  4213. cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
  4214. if (deliver_cab)
  4215. cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
  4216. return skb;
  4217. }
  4218. void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
  4219. const struct wmi_wmm_params_arg *arg)
  4220. {
  4221. params->cwmin = __cpu_to_le32(arg->cwmin);
  4222. params->cwmax = __cpu_to_le32(arg->cwmax);
  4223. params->aifs = __cpu_to_le32(arg->aifs);
  4224. params->txop = __cpu_to_le32(arg->txop);
  4225. params->acm = __cpu_to_le32(arg->acm);
  4226. params->no_ack = __cpu_to_le32(arg->no_ack);
  4227. }
  4228. static struct sk_buff *
  4229. ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
  4230. const struct wmi_wmm_params_all_arg *arg)
  4231. {
  4232. struct wmi_pdev_set_wmm_params *cmd;
  4233. struct sk_buff *skb;
  4234. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  4235. if (!skb)
  4236. return ERR_PTR(-ENOMEM);
  4237. cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
  4238. ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
  4239. ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
  4240. ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
  4241. ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
  4242. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
  4243. return skb;
  4244. }
  4245. static struct sk_buff *
  4246. ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
  4247. {
  4248. struct wmi_request_stats_cmd *cmd;
  4249. struct sk_buff *skb;
  4250. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  4251. if (!skb)
  4252. return ERR_PTR(-ENOMEM);
  4253. cmd = (struct wmi_request_stats_cmd *)skb->data;
  4254. cmd->stats_id = __cpu_to_le32(stats_mask);
  4255. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
  4256. stats_mask);
  4257. return skb;
  4258. }
  4259. static struct sk_buff *
  4260. ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
  4261. enum wmi_force_fw_hang_type type, u32 delay_ms)
  4262. {
  4263. struct wmi_force_fw_hang_cmd *cmd;
  4264. struct sk_buff *skb;
  4265. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  4266. if (!skb)
  4267. return ERR_PTR(-ENOMEM);
  4268. cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
  4269. cmd->type = __cpu_to_le32(type);
  4270. cmd->delay_ms = __cpu_to_le32(delay_ms);
  4271. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
  4272. type, delay_ms);
  4273. return skb;
  4274. }
  4275. static struct sk_buff *
  4276. ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
  4277. u32 log_level)
  4278. {
  4279. struct wmi_dbglog_cfg_cmd *cmd;
  4280. struct sk_buff *skb;
  4281. u32 cfg;
  4282. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  4283. if (!skb)
  4284. return ERR_PTR(-ENOMEM);
  4285. cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
  4286. if (module_enable) {
  4287. cfg = SM(log_level,
  4288. ATH10K_DBGLOG_CFG_LOG_LVL);
  4289. } else {
  4290. /* set back defaults, all modules with WARN level */
  4291. cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
  4292. ATH10K_DBGLOG_CFG_LOG_LVL);
  4293. module_enable = ~0;
  4294. }
  4295. cmd->module_enable = __cpu_to_le32(module_enable);
  4296. cmd->module_valid = __cpu_to_le32(~0);
  4297. cmd->config_enable = __cpu_to_le32(cfg);
  4298. cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
  4299. ath10k_dbg(ar, ATH10K_DBG_WMI,
  4300. "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
  4301. __le32_to_cpu(cmd->module_enable),
  4302. __le32_to_cpu(cmd->module_valid),
  4303. __le32_to_cpu(cmd->config_enable),
  4304. __le32_to_cpu(cmd->config_valid));
  4305. return skb;
  4306. }
  4307. static struct sk_buff *
  4308. ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
  4309. {
  4310. struct wmi_pdev_pktlog_enable_cmd *cmd;
  4311. struct sk_buff *skb;
  4312. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  4313. if (!skb)
  4314. return ERR_PTR(-ENOMEM);
  4315. ev_bitmap &= ATH10K_PKTLOG_ANY;
  4316. cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
  4317. cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
  4318. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
  4319. ev_bitmap);
  4320. return skb;
  4321. }
  4322. static struct sk_buff *
  4323. ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
  4324. {
  4325. struct sk_buff *skb;
  4326. skb = ath10k_wmi_alloc_skb(ar, 0);
  4327. if (!skb)
  4328. return ERR_PTR(-ENOMEM);
  4329. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
  4330. return skb;
  4331. }
  4332. static struct sk_buff *
  4333. ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
  4334. u32 duration, u32 next_offset,
  4335. u32 enabled)
  4336. {
  4337. struct wmi_pdev_set_quiet_cmd *cmd;
  4338. struct sk_buff *skb;
  4339. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  4340. if (!skb)
  4341. return ERR_PTR(-ENOMEM);
  4342. cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
  4343. cmd->period = __cpu_to_le32(period);
  4344. cmd->duration = __cpu_to_le32(duration);
  4345. cmd->next_start = __cpu_to_le32(next_offset);
  4346. cmd->enabled = __cpu_to_le32(enabled);
  4347. ath10k_dbg(ar, ATH10K_DBG_WMI,
  4348. "wmi quiet param: period %u duration %u enabled %d\n",
  4349. period, duration, enabled);
  4350. return skb;
  4351. }
  4352. static struct sk_buff *
  4353. ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
  4354. const u8 *mac)
  4355. {
  4356. struct wmi_addba_clear_resp_cmd *cmd;
  4357. struct sk_buff *skb;
  4358. if (!mac)
  4359. return ERR_PTR(-EINVAL);
  4360. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  4361. if (!skb)
  4362. return ERR_PTR(-ENOMEM);
  4363. cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
  4364. cmd->vdev_id = __cpu_to_le32(vdev_id);
  4365. ether_addr_copy(cmd->peer_macaddr.addr, mac);
  4366. ath10k_dbg(ar, ATH10K_DBG_WMI,
  4367. "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
  4368. vdev_id, mac);
  4369. return skb;
  4370. }
  4371. static struct sk_buff *
  4372. ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  4373. u32 tid, u32 buf_size)
  4374. {
  4375. struct wmi_addba_send_cmd *cmd;
  4376. struct sk_buff *skb;
  4377. if (!mac)
  4378. return ERR_PTR(-EINVAL);
  4379. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  4380. if (!skb)
  4381. return ERR_PTR(-ENOMEM);
  4382. cmd = (struct wmi_addba_send_cmd *)skb->data;
  4383. cmd->vdev_id = __cpu_to_le32(vdev_id);
  4384. ether_addr_copy(cmd->peer_macaddr.addr, mac);
  4385. cmd->tid = __cpu_to_le32(tid);
  4386. cmd->buffersize = __cpu_to_le32(buf_size);
  4387. ath10k_dbg(ar, ATH10K_DBG_WMI,
  4388. "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
  4389. vdev_id, mac, tid, buf_size);
  4390. return skb;
  4391. }
  4392. static struct sk_buff *
  4393. ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  4394. u32 tid, u32 status)
  4395. {
  4396. struct wmi_addba_setresponse_cmd *cmd;
  4397. struct sk_buff *skb;
  4398. if (!mac)
  4399. return ERR_PTR(-EINVAL);
  4400. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  4401. if (!skb)
  4402. return ERR_PTR(-ENOMEM);
  4403. cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
  4404. cmd->vdev_id = __cpu_to_le32(vdev_id);
  4405. ether_addr_copy(cmd->peer_macaddr.addr, mac);
  4406. cmd->tid = __cpu_to_le32(tid);
  4407. cmd->statuscode = __cpu_to_le32(status);
  4408. ath10k_dbg(ar, ATH10K_DBG_WMI,
  4409. "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
  4410. vdev_id, mac, tid, status);
  4411. return skb;
  4412. }
  4413. static struct sk_buff *
  4414. ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  4415. u32 tid, u32 initiator, u32 reason)
  4416. {
  4417. struct wmi_delba_send_cmd *cmd;
  4418. struct sk_buff *skb;
  4419. if (!mac)
  4420. return ERR_PTR(-EINVAL);
  4421. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  4422. if (!skb)
  4423. return ERR_PTR(-ENOMEM);
  4424. cmd = (struct wmi_delba_send_cmd *)skb->data;
  4425. cmd->vdev_id = __cpu_to_le32(vdev_id);
  4426. ether_addr_copy(cmd->peer_macaddr.addr, mac);
  4427. cmd->tid = __cpu_to_le32(tid);
  4428. cmd->initiator = __cpu_to_le32(initiator);
  4429. cmd->reasoncode = __cpu_to_le32(reason);
  4430. ath10k_dbg(ar, ATH10K_DBG_WMI,
  4431. "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
  4432. vdev_id, mac, tid, initiator, reason);
  4433. return skb;
  4434. }
  4435. static const struct wmi_ops wmi_ops = {
  4436. .rx = ath10k_wmi_op_rx,
  4437. .map_svc = wmi_main_svc_map,
  4438. .pull_scan = ath10k_wmi_op_pull_scan_ev,
  4439. .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
  4440. .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
  4441. .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
  4442. .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
  4443. .pull_swba = ath10k_wmi_op_pull_swba_ev,
  4444. .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
  4445. .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
  4446. .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
  4447. .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
  4448. .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
  4449. .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
  4450. .gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
  4451. .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
  4452. .gen_init = ath10k_wmi_op_gen_init,
  4453. .gen_start_scan = ath10k_wmi_op_gen_start_scan,
  4454. .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
  4455. .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
  4456. .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
  4457. .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
  4458. .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
  4459. .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
  4460. .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
  4461. .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
  4462. .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
  4463. .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
  4464. .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
  4465. /* .gen_vdev_wmm_conf not implemented */
  4466. .gen_peer_create = ath10k_wmi_op_gen_peer_create,
  4467. .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
  4468. .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
  4469. .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
  4470. .gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
  4471. .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
  4472. .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
  4473. .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
  4474. .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
  4475. .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
  4476. .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
  4477. .gen_request_stats = ath10k_wmi_op_gen_request_stats,
  4478. .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
  4479. .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
  4480. .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
  4481. .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
  4482. .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
  4483. .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
  4484. /* .gen_pdev_get_temperature not implemented */
  4485. .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
  4486. .gen_addba_send = ath10k_wmi_op_gen_addba_send,
  4487. .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
  4488. .gen_delba_send = ath10k_wmi_op_gen_delba_send,
  4489. /* .gen_bcn_tmpl not implemented */
  4490. /* .gen_prb_tmpl not implemented */
  4491. /* .gen_p2p_go_bcn_ie not implemented */
  4492. };
  4493. static const struct wmi_ops wmi_10_1_ops = {
  4494. .rx = ath10k_wmi_10_1_op_rx,
  4495. .map_svc = wmi_10x_svc_map,
  4496. .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
  4497. .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
  4498. .gen_init = ath10k_wmi_10_1_op_gen_init,
  4499. .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
  4500. .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
  4501. .gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
  4502. /* .gen_pdev_get_temperature not implemented */
  4503. /* shared with main branch */
  4504. .pull_scan = ath10k_wmi_op_pull_scan_ev,
  4505. .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
  4506. .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
  4507. .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
  4508. .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
  4509. .pull_swba = ath10k_wmi_op_pull_swba_ev,
  4510. .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
  4511. .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
  4512. .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
  4513. .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
  4514. .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
  4515. .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
  4516. .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
  4517. .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
  4518. .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
  4519. .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
  4520. .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
  4521. .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
  4522. .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
  4523. .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
  4524. .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
  4525. .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
  4526. /* .gen_vdev_wmm_conf not implemented */
  4527. .gen_peer_create = ath10k_wmi_op_gen_peer_create,
  4528. .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
  4529. .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
  4530. .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
  4531. .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
  4532. .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
  4533. .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
  4534. .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
  4535. .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
  4536. .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
  4537. .gen_request_stats = ath10k_wmi_op_gen_request_stats,
  4538. .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
  4539. .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
  4540. .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
  4541. .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
  4542. .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
  4543. .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
  4544. .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
  4545. .gen_addba_send = ath10k_wmi_op_gen_addba_send,
  4546. .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
  4547. .gen_delba_send = ath10k_wmi_op_gen_delba_send,
  4548. /* .gen_bcn_tmpl not implemented */
  4549. /* .gen_prb_tmpl not implemented */
  4550. /* .gen_p2p_go_bcn_ie not implemented */
  4551. };
  4552. static const struct wmi_ops wmi_10_2_ops = {
  4553. .rx = ath10k_wmi_10_2_op_rx,
  4554. .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
  4555. .gen_init = ath10k_wmi_10_2_op_gen_init,
  4556. .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
  4557. /* .gen_pdev_get_temperature not implemented */
  4558. /* shared with 10.1 */
  4559. .map_svc = wmi_10x_svc_map,
  4560. .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
  4561. .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
  4562. .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
  4563. .pull_scan = ath10k_wmi_op_pull_scan_ev,
  4564. .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
  4565. .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
  4566. .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
  4567. .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
  4568. .pull_swba = ath10k_wmi_op_pull_swba_ev,
  4569. .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
  4570. .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
  4571. .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
  4572. .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
  4573. .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
  4574. .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
  4575. .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
  4576. .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
  4577. .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
  4578. .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
  4579. .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
  4580. .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
  4581. .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
  4582. .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
  4583. .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
  4584. .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
  4585. /* .gen_vdev_wmm_conf not implemented */
  4586. .gen_peer_create = ath10k_wmi_op_gen_peer_create,
  4587. .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
  4588. .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
  4589. .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
  4590. .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
  4591. .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
  4592. .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
  4593. .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
  4594. .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
  4595. .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
  4596. .gen_request_stats = ath10k_wmi_op_gen_request_stats,
  4597. .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
  4598. .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
  4599. .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
  4600. .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
  4601. .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
  4602. .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
  4603. .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
  4604. .gen_addba_send = ath10k_wmi_op_gen_addba_send,
  4605. .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
  4606. .gen_delba_send = ath10k_wmi_op_gen_delba_send,
  4607. };
  4608. static const struct wmi_ops wmi_10_2_4_ops = {
  4609. .rx = ath10k_wmi_10_2_op_rx,
  4610. .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
  4611. .gen_init = ath10k_wmi_10_2_op_gen_init,
  4612. .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
  4613. .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
  4614. /* shared with 10.1 */
  4615. .map_svc = wmi_10x_svc_map,
  4616. .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
  4617. .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
  4618. .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
  4619. .pull_scan = ath10k_wmi_op_pull_scan_ev,
  4620. .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
  4621. .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
  4622. .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
  4623. .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
  4624. .pull_swba = ath10k_wmi_op_pull_swba_ev,
  4625. .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
  4626. .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
  4627. .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
  4628. .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
  4629. .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
  4630. .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
  4631. .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
  4632. .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
  4633. .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
  4634. .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
  4635. .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
  4636. .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
  4637. .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
  4638. .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
  4639. .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
  4640. .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
  4641. .gen_peer_create = ath10k_wmi_op_gen_peer_create,
  4642. .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
  4643. .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
  4644. .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
  4645. .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
  4646. .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
  4647. .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
  4648. .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
  4649. .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
  4650. .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
  4651. .gen_request_stats = ath10k_wmi_op_gen_request_stats,
  4652. .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
  4653. .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
  4654. .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
  4655. .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
  4656. .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
  4657. .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
  4658. .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
  4659. .gen_addba_send = ath10k_wmi_op_gen_addba_send,
  4660. .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
  4661. .gen_delba_send = ath10k_wmi_op_gen_delba_send,
  4662. /* .gen_bcn_tmpl not implemented */
  4663. /* .gen_prb_tmpl not implemented */
  4664. /* .gen_p2p_go_bcn_ie not implemented */
  4665. };
  4666. int ath10k_wmi_attach(struct ath10k *ar)
  4667. {
  4668. switch (ar->wmi.op_version) {
  4669. case ATH10K_FW_WMI_OP_VERSION_10_2_4:
  4670. ar->wmi.cmd = &wmi_10_2_4_cmd_map;
  4671. ar->wmi.ops = &wmi_10_2_4_ops;
  4672. ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
  4673. ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
  4674. break;
  4675. case ATH10K_FW_WMI_OP_VERSION_10_2:
  4676. ar->wmi.cmd = &wmi_10_2_cmd_map;
  4677. ar->wmi.ops = &wmi_10_2_ops;
  4678. ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
  4679. ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
  4680. break;
  4681. case ATH10K_FW_WMI_OP_VERSION_10_1:
  4682. ar->wmi.cmd = &wmi_10x_cmd_map;
  4683. ar->wmi.ops = &wmi_10_1_ops;
  4684. ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
  4685. ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
  4686. break;
  4687. case ATH10K_FW_WMI_OP_VERSION_MAIN:
  4688. ar->wmi.cmd = &wmi_cmd_map;
  4689. ar->wmi.ops = &wmi_ops;
  4690. ar->wmi.vdev_param = &wmi_vdev_param_map;
  4691. ar->wmi.pdev_param = &wmi_pdev_param_map;
  4692. break;
  4693. case ATH10K_FW_WMI_OP_VERSION_TLV:
  4694. ath10k_wmi_tlv_attach(ar);
  4695. break;
  4696. case ATH10K_FW_WMI_OP_VERSION_UNSET:
  4697. case ATH10K_FW_WMI_OP_VERSION_MAX:
  4698. ath10k_err(ar, "unsupported WMI op version: %d\n",
  4699. ar->wmi.op_version);
  4700. return -EINVAL;
  4701. }
  4702. init_completion(&ar->wmi.service_ready);
  4703. init_completion(&ar->wmi.unified_ready);
  4704. return 0;
  4705. }
  4706. void ath10k_wmi_detach(struct ath10k *ar)
  4707. {
  4708. int i;
  4709. /* free the host memory chunks requested by firmware */
  4710. for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  4711. dma_free_coherent(ar->dev,
  4712. ar->wmi.mem_chunks[i].len,
  4713. ar->wmi.mem_chunks[i].vaddr,
  4714. ar->wmi.mem_chunks[i].paddr);
  4715. }
  4716. ar->wmi.num_mem_chunks = 0;
  4717. }