wmi.c 137 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <linux/skbuff.h>
  18. #include <linux/ctype.h>
  19. #include "core.h"
  20. #include "htc.h"
  21. #include "debug.h"
  22. #include "wmi.h"
  23. #include "mac.h"
  24. #include "testmode.h"
  25. /* MAIN WMI cmd track */
  26. static struct wmi_cmd_map wmi_cmd_map = {
  27. .init_cmdid = WMI_INIT_CMDID,
  28. .start_scan_cmdid = WMI_START_SCAN_CMDID,
  29. .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
  30. .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
  31. .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
  32. .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
  33. .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
  34. .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
  35. .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
  36. .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
  37. .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
  38. .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
  39. .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
  40. .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
  41. .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
  42. .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  43. .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
  44. .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
  45. .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
  46. .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
  47. .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
  48. .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
  49. .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
  50. .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
  51. .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
  52. .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
  53. .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
  54. .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
  55. .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
  56. .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
  57. .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
  58. .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
  59. .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
  60. .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
  61. .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
  62. .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
  63. .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
  64. .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
  65. .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
  66. .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
  67. .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
  68. .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
  69. .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
  70. .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
  71. .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
  72. .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
  73. .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
  74. .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
  75. .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
  76. .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
  77. .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
  78. .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
  79. .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
  80. .roam_scan_mode = WMI_ROAM_SCAN_MODE,
  81. .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
  82. .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
  83. .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  84. .roam_ap_profile = WMI_ROAM_AP_PROFILE,
  85. .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
  86. .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
  87. .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
  88. .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
  89. .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
  90. .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
  91. .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
  92. .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
  93. .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
  94. .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
  95. .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
  96. .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
  97. .wlan_profile_set_hist_intvl_cmdid =
  98. WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  99. .wlan_profile_get_profile_data_cmdid =
  100. WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  101. .wlan_profile_enable_profile_id_cmdid =
  102. WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  103. .wlan_profile_list_profile_id_cmdid =
  104. WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  105. .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
  106. .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
  107. .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
  108. .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
  109. .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
  110. .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
  111. .wow_enable_disable_wake_event_cmdid =
  112. WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  113. .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
  114. .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  115. .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
  116. .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
  117. .vdev_spectral_scan_configure_cmdid =
  118. WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  119. .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  120. .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
  121. .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
  122. .network_list_offload_config_cmdid =
  123. WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
  124. .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
  125. .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
  126. .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
  127. .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
  128. .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
  129. .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
  130. .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
  131. .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
  132. .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
  133. .echo_cmdid = WMI_ECHO_CMDID,
  134. .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
  135. .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
  136. .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
  137. .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
  138. .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
  139. .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
  140. .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
  141. .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
  142. .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
  143. };
  144. /* 10.X WMI cmd track */
  145. static struct wmi_cmd_map wmi_10x_cmd_map = {
  146. .init_cmdid = WMI_10X_INIT_CMDID,
  147. .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
  148. .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
  149. .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
  150. .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
  151. .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
  152. .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
  153. .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
  154. .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
  155. .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
  156. .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
  157. .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
  158. .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
  159. .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
  160. .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
  161. .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  162. .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
  163. .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
  164. .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
  165. .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
  166. .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
  167. .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
  168. .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
  169. .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
  170. .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
  171. .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
  172. .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
  173. .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
  174. .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
  175. .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
  176. .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
  177. .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
  178. .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
  179. .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
  180. .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
  181. .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
  182. .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
  183. .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  184. .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
  185. .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
  186. .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
  187. .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  188. .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
  189. .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
  190. .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
  191. .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
  192. .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
  193. .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
  194. .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
  195. .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
  196. .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
  197. .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
  198. .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
  199. .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
  200. .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
  201. .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
  202. .roam_scan_rssi_change_threshold =
  203. WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  204. .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
  205. .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
  206. .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
  207. .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
  208. .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
  209. .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
  210. .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
  211. .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
  212. .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
  213. .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
  214. .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
  215. .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
  216. .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
  217. .wlan_profile_set_hist_intvl_cmdid =
  218. WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  219. .wlan_profile_get_profile_data_cmdid =
  220. WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  221. .wlan_profile_enable_profile_id_cmdid =
  222. WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  223. .wlan_profile_list_profile_id_cmdid =
  224. WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  225. .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
  226. .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
  227. .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
  228. .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
  229. .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
  230. .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
  231. .wow_enable_disable_wake_event_cmdid =
  232. WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  233. .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
  234. .wow_hostwakeup_from_sleep_cmdid =
  235. WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  236. .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
  237. .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
  238. .vdev_spectral_scan_configure_cmdid =
  239. WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  240. .vdev_spectral_scan_enable_cmdid =
  241. WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  242. .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
  243. .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
  244. .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
  245. .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
  246. .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
  247. .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
  248. .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
  249. .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
  250. .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
  251. .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
  252. .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
  253. .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
  254. .echo_cmdid = WMI_10X_ECHO_CMDID,
  255. .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
  256. .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
  257. .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
  258. .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
  259. .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  260. .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  261. .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
  262. .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
  263. .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
  264. };
  265. /* MAIN WMI VDEV param map */
  266. static struct wmi_vdev_param_map wmi_vdev_param_map = {
  267. .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
  268. .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
  269. .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
  270. .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
  271. .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
  272. .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
  273. .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
  274. .preamble = WMI_VDEV_PARAM_PREAMBLE,
  275. .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
  276. .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
  277. .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
  278. .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
  279. .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
  280. .wmi_vdev_oc_scheduler_air_time_limit =
  281. WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
  282. .wds = WMI_VDEV_PARAM_WDS,
  283. .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
  284. .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
  285. .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
  286. .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
  287. .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
  288. .chwidth = WMI_VDEV_PARAM_CHWIDTH,
  289. .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
  290. .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
  291. .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
  292. .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
  293. .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
  294. .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
  295. .sgi = WMI_VDEV_PARAM_SGI,
  296. .ldpc = WMI_VDEV_PARAM_LDPC,
  297. .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
  298. .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
  299. .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
  300. .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
  301. .nss = WMI_VDEV_PARAM_NSS,
  302. .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
  303. .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
  304. .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
  305. .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
  306. .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
  307. .ap_keepalive_min_idle_inactive_time_secs =
  308. WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
  309. .ap_keepalive_max_idle_inactive_time_secs =
  310. WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
  311. .ap_keepalive_max_unresponsive_time_secs =
  312. WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
  313. .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
  314. .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
  315. .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
  316. .txbf = WMI_VDEV_PARAM_TXBF,
  317. .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
  318. .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
  319. .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
  320. .ap_detect_out_of_sync_sleeping_sta_time_secs =
  321. WMI_VDEV_PARAM_UNSUPPORTED,
  322. };
  323. /* 10.X WMI VDEV param map */
  324. static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
  325. .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
  326. .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
  327. .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
  328. .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
  329. .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
  330. .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
  331. .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
  332. .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
  333. .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
  334. .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
  335. .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
  336. .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
  337. .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
  338. .wmi_vdev_oc_scheduler_air_time_limit =
  339. WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
  340. .wds = WMI_10X_VDEV_PARAM_WDS,
  341. .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
  342. .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
  343. .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
  344. .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
  345. .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
  346. .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
  347. .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
  348. .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
  349. .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
  350. .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
  351. .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
  352. .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
  353. .sgi = WMI_10X_VDEV_PARAM_SGI,
  354. .ldpc = WMI_10X_VDEV_PARAM_LDPC,
  355. .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
  356. .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
  357. .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
  358. .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
  359. .nss = WMI_10X_VDEV_PARAM_NSS,
  360. .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
  361. .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
  362. .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
  363. .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
  364. .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
  365. .ap_keepalive_min_idle_inactive_time_secs =
  366. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
  367. .ap_keepalive_max_idle_inactive_time_secs =
  368. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
  369. .ap_keepalive_max_unresponsive_time_secs =
  370. WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
  371. .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
  372. .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
  373. .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
  374. .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
  375. .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
  376. .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
  377. .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
  378. .ap_detect_out_of_sync_sleeping_sta_time_secs =
  379. WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
  380. };
  381. static struct wmi_pdev_param_map wmi_pdev_param_map = {
  382. .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
  383. .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
  384. .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
  385. .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
  386. .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
  387. .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
  388. .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
  389. .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
  390. .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
  391. .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
  392. .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
  393. .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
  394. .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
  395. .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
  396. .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
  397. .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
  398. .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
  399. .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
  400. .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
  401. .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
  402. .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
  403. .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
  404. .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
  405. .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
  406. .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
  407. .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
  408. .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
  409. .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
  410. .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
  411. .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
  412. .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
  413. .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
  414. .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
  415. .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
  416. .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
  417. .dcs = WMI_PDEV_PARAM_DCS,
  418. .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
  419. .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
  420. .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
  421. .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
  422. .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
  423. .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
  424. .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
  425. .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
  426. .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
  427. .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
  428. .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
  429. .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
  430. };
  431. static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
  432. .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
  433. .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
  434. .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
  435. .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
  436. .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
  437. .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
  438. .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
  439. .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
  440. .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
  441. .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
  442. .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
  443. .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
  444. .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
  445. .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
  446. .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
  447. .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
  448. .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
  449. .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
  450. .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
  451. .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
  452. .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
  453. .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
  454. .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
  455. .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
  456. .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
  457. .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
  458. .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
  459. .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
  460. .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
  461. .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
  462. .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
  463. .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
  464. .bcnflt_stats_update_period =
  465. WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
  466. .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
  467. .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
  468. .dcs = WMI_10X_PDEV_PARAM_DCS,
  469. .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
  470. .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
  471. .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
  472. .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
  473. .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
  474. .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
  475. .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
  476. .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
  477. .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
  478. .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
  479. .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
  480. .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
  481. };
  482. /* firmware 10.2 specific mappings */
  483. static struct wmi_cmd_map wmi_10_2_cmd_map = {
  484. .init_cmdid = WMI_10_2_INIT_CMDID,
  485. .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
  486. .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
  487. .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
  488. .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
  489. .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
  490. .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
  491. .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
  492. .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
  493. .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
  494. .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
  495. .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
  496. .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
  497. .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
  498. .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  499. .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
  500. .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
  501. .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
  502. .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
  503. .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
  504. .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
  505. .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
  506. .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
  507. .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
  508. .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
  509. .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
  510. .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
  511. .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
  512. .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
  513. .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
  514. .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
  515. .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
  516. .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
  517. .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
  518. .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
  519. .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
  520. .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  521. .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
  522. .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
  523. .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
  524. .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
  525. .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
  526. .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
  527. .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
  528. .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
  529. .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
  530. .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
  531. .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
  532. .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
  533. .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
  534. .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
  535. .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
  536. .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
  537. .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
  538. .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
  539. .roam_scan_rssi_change_threshold =
  540. WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  541. .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
  542. .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
  543. .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
  544. .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
  545. .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
  546. .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
  547. .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
  548. .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
  549. .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
  550. .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
  551. .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
  552. .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
  553. .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
  554. .wlan_profile_set_hist_intvl_cmdid =
  555. WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  556. .wlan_profile_get_profile_data_cmdid =
  557. WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  558. .wlan_profile_enable_profile_id_cmdid =
  559. WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  560. .wlan_profile_list_profile_id_cmdid =
  561. WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  562. .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
  563. .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
  564. .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
  565. .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
  566. .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
  567. .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
  568. .wow_enable_disable_wake_event_cmdid =
  569. WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  570. .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
  571. .wow_hostwakeup_from_sleep_cmdid =
  572. WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  573. .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
  574. .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
  575. .vdev_spectral_scan_configure_cmdid =
  576. WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
  577. .vdev_spectral_scan_enable_cmdid =
  578. WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
  579. .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
  580. .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
  581. .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
  582. .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
  583. .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
  584. .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
  585. .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
  586. .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
  587. .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
  588. .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
  589. .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
  590. .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
  591. .echo_cmdid = WMI_10_2_ECHO_CMDID,
  592. .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
  593. .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
  594. .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
  595. .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
  596. .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  597. .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  598. .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
  599. .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
  600. .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
  601. };
  602. static void
  603. ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
  604. const struct wmi_channel_arg *arg)
  605. {
  606. u32 flags = 0;
  607. memset(ch, 0, sizeof(*ch));
  608. if (arg->passive)
  609. flags |= WMI_CHAN_FLAG_PASSIVE;
  610. if (arg->allow_ibss)
  611. flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
  612. if (arg->allow_ht)
  613. flags |= WMI_CHAN_FLAG_ALLOW_HT;
  614. if (arg->allow_vht)
  615. flags |= WMI_CHAN_FLAG_ALLOW_VHT;
  616. if (arg->ht40plus)
  617. flags |= WMI_CHAN_FLAG_HT40_PLUS;
  618. if (arg->chan_radar)
  619. flags |= WMI_CHAN_FLAG_DFS;
  620. ch->mhz = __cpu_to_le32(arg->freq);
  621. ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
  622. ch->band_center_freq2 = 0;
  623. ch->min_power = arg->min_power;
  624. ch->max_power = arg->max_power;
  625. ch->reg_power = arg->max_reg_power;
  626. ch->antenna_max = arg->max_antenna_gain;
  627. /* mode & flags share storage */
  628. ch->mode = arg->mode;
  629. ch->flags |= __cpu_to_le32(flags);
  630. }
  631. int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
  632. {
  633. int ret;
  634. ret = wait_for_completion_timeout(&ar->wmi.service_ready,
  635. WMI_SERVICE_READY_TIMEOUT_HZ);
  636. return ret;
  637. }
  638. int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
  639. {
  640. int ret;
  641. ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
  642. WMI_UNIFIED_READY_TIMEOUT_HZ);
  643. return ret;
  644. }
  645. struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
  646. {
  647. struct sk_buff *skb;
  648. u32 round_len = roundup(len, 4);
  649. skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
  650. if (!skb)
  651. return NULL;
  652. skb_reserve(skb, WMI_SKB_HEADROOM);
  653. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  654. ath10k_warn(ar, "Unaligned WMI skb\n");
  655. skb_put(skb, round_len);
  656. memset(skb->data, 0, round_len);
  657. return skb;
  658. }
  659. static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
  660. {
  661. dev_kfree_skb(skb);
  662. }
  663. static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
  664. u32 cmd_id)
  665. {
  666. struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
  667. struct wmi_cmd_hdr *cmd_hdr;
  668. int ret;
  669. u32 cmd = 0;
  670. if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  671. return -ENOMEM;
  672. cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
  673. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  674. cmd_hdr->cmd_id = __cpu_to_le32(cmd);
  675. memset(skb_cb, 0, sizeof(*skb_cb));
  676. ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
  677. trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
  678. if (ret)
  679. goto err_pull;
  680. return 0;
  681. err_pull:
  682. skb_pull(skb, sizeof(struct wmi_cmd_hdr));
  683. return ret;
  684. }
  685. static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
  686. {
  687. int ret;
  688. lockdep_assert_held(&arvif->ar->data_lock);
  689. if (arvif->beacon == NULL)
  690. return;
  691. if (arvif->beacon_sent)
  692. return;
  693. ret = ath10k_wmi_beacon_send_ref_nowait(arvif);
  694. if (ret)
  695. return;
  696. /* We need to retain the arvif->beacon reference for DMA unmapping and
  697. * freeing the skbuff later. */
  698. arvif->beacon_sent = true;
  699. }
  700. static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
  701. struct ieee80211_vif *vif)
  702. {
  703. struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
  704. ath10k_wmi_tx_beacon_nowait(arvif);
  705. }
  706. static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
  707. {
  708. spin_lock_bh(&ar->data_lock);
  709. ieee80211_iterate_active_interfaces_atomic(ar->hw,
  710. IEEE80211_IFACE_ITER_NORMAL,
  711. ath10k_wmi_tx_beacons_iter,
  712. NULL);
  713. spin_unlock_bh(&ar->data_lock);
  714. }
  715. static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
  716. {
  717. /* try to send pending beacons first. they take priority */
  718. ath10k_wmi_tx_beacons_nowait(ar);
  719. wake_up(&ar->wmi.tx_credits_wq);
  720. }
  721. int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
  722. {
  723. int ret = -EOPNOTSUPP;
  724. might_sleep();
  725. if (cmd_id == WMI_CMD_UNSUPPORTED) {
  726. ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
  727. cmd_id);
  728. return ret;
  729. }
  730. wait_event_timeout(ar->wmi.tx_credits_wq, ({
  731. /* try to send pending beacons first. they take priority */
  732. ath10k_wmi_tx_beacons_nowait(ar);
  733. ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
  734. if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
  735. ret = -ESHUTDOWN;
  736. (ret != -EAGAIN);
  737. }), 3*HZ);
  738. if (ret)
  739. dev_kfree_skb_any(skb);
  740. return ret;
  741. }
  742. int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
  743. {
  744. int ret = 0;
  745. struct wmi_mgmt_tx_cmd *cmd;
  746. struct ieee80211_hdr *hdr;
  747. struct sk_buff *wmi_skb;
  748. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  749. int len;
  750. u32 buf_len = skb->len;
  751. u16 fc;
  752. hdr = (struct ieee80211_hdr *)skb->data;
  753. fc = le16_to_cpu(hdr->frame_control);
  754. if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
  755. return -EINVAL;
  756. len = sizeof(cmd->hdr) + skb->len;
  757. if ((ieee80211_is_action(hdr->frame_control) ||
  758. ieee80211_is_deauth(hdr->frame_control) ||
  759. ieee80211_is_disassoc(hdr->frame_control)) &&
  760. ieee80211_has_protected(hdr->frame_control)) {
  761. len += IEEE80211_CCMP_MIC_LEN;
  762. buf_len += IEEE80211_CCMP_MIC_LEN;
  763. }
  764. len = round_up(len, 4);
  765. wmi_skb = ath10k_wmi_alloc_skb(ar, len);
  766. if (!wmi_skb)
  767. return -ENOMEM;
  768. cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data;
  769. cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
  770. cmd->hdr.tx_rate = 0;
  771. cmd->hdr.tx_power = 0;
  772. cmd->hdr.buf_len = __cpu_to_le32(buf_len);
  773. ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
  774. memcpy(cmd->buf, skb->data, skb->len);
  775. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
  776. wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
  777. fc & IEEE80211_FCTL_STYPE);
  778. trace_ath10k_tx_hdr(ar, skb->data, skb->len);
  779. trace_ath10k_tx_payload(ar, skb->data, skb->len);
  780. /* Send the management frame buffer to the target */
  781. ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
  782. if (ret)
  783. return ret;
  784. /* TODO: report tx status to mac80211 - temporary just ACK */
  785. info->flags |= IEEE80211_TX_STAT_ACK;
  786. ieee80211_tx_status_irqsafe(ar->hw, skb);
  787. return ret;
  788. }
  789. static void ath10k_wmi_event_scan_started(struct ath10k *ar)
  790. {
  791. lockdep_assert_held(&ar->data_lock);
  792. switch (ar->scan.state) {
  793. case ATH10K_SCAN_IDLE:
  794. case ATH10K_SCAN_RUNNING:
  795. case ATH10K_SCAN_ABORTING:
  796. ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
  797. ath10k_scan_state_str(ar->scan.state),
  798. ar->scan.state);
  799. break;
  800. case ATH10K_SCAN_STARTING:
  801. ar->scan.state = ATH10K_SCAN_RUNNING;
  802. if (ar->scan.is_roc)
  803. ieee80211_ready_on_channel(ar->hw);
  804. complete(&ar->scan.started);
  805. break;
  806. }
  807. }
  808. static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
  809. {
  810. lockdep_assert_held(&ar->data_lock);
  811. switch (ar->scan.state) {
  812. case ATH10K_SCAN_IDLE:
  813. case ATH10K_SCAN_STARTING:
  814. /* One suspected reason scan can be completed while starting is
  815. * if firmware fails to deliver all scan events to the host,
  816. * e.g. when transport pipe is full. This has been observed
  817. * with spectral scan phyerr events starving wmi transport
  818. * pipe. In such case the "scan completed" event should be (and
  819. * is) ignored by the host as it may be just firmware's scan
  820. * state machine recovering.
  821. */
  822. ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
  823. ath10k_scan_state_str(ar->scan.state),
  824. ar->scan.state);
  825. break;
  826. case ATH10K_SCAN_RUNNING:
  827. case ATH10K_SCAN_ABORTING:
  828. __ath10k_scan_finish(ar);
  829. break;
  830. }
  831. }
  832. static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
  833. {
  834. lockdep_assert_held(&ar->data_lock);
  835. switch (ar->scan.state) {
  836. case ATH10K_SCAN_IDLE:
  837. case ATH10K_SCAN_STARTING:
  838. ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
  839. ath10k_scan_state_str(ar->scan.state),
  840. ar->scan.state);
  841. break;
  842. case ATH10K_SCAN_RUNNING:
  843. case ATH10K_SCAN_ABORTING:
  844. ar->scan_channel = NULL;
  845. break;
  846. }
  847. }
  848. static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
  849. {
  850. lockdep_assert_held(&ar->data_lock);
  851. switch (ar->scan.state) {
  852. case ATH10K_SCAN_IDLE:
  853. case ATH10K_SCAN_STARTING:
  854. ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
  855. ath10k_scan_state_str(ar->scan.state),
  856. ar->scan.state);
  857. break;
  858. case ATH10K_SCAN_RUNNING:
  859. case ATH10K_SCAN_ABORTING:
  860. ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
  861. if (ar->scan.is_roc && ar->scan.roc_freq == freq)
  862. complete(&ar->scan.on_channel);
  863. break;
  864. }
  865. }
  866. static const char *
  867. ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
  868. enum wmi_scan_completion_reason reason)
  869. {
  870. switch (type) {
  871. case WMI_SCAN_EVENT_STARTED:
  872. return "started";
  873. case WMI_SCAN_EVENT_COMPLETED:
  874. switch (reason) {
  875. case WMI_SCAN_REASON_COMPLETED:
  876. return "completed";
  877. case WMI_SCAN_REASON_CANCELLED:
  878. return "completed [cancelled]";
  879. case WMI_SCAN_REASON_PREEMPTED:
  880. return "completed [preempted]";
  881. case WMI_SCAN_REASON_TIMEDOUT:
  882. return "completed [timedout]";
  883. case WMI_SCAN_REASON_MAX:
  884. break;
  885. }
  886. return "completed [unknown]";
  887. case WMI_SCAN_EVENT_BSS_CHANNEL:
  888. return "bss channel";
  889. case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
  890. return "foreign channel";
  891. case WMI_SCAN_EVENT_DEQUEUED:
  892. return "dequeued";
  893. case WMI_SCAN_EVENT_PREEMPTED:
  894. return "preempted";
  895. case WMI_SCAN_EVENT_START_FAILED:
  896. return "start failed";
  897. default:
  898. return "unknown";
  899. }
  900. }
  901. static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
  902. {
  903. struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
  904. enum wmi_scan_event_type event_type;
  905. enum wmi_scan_completion_reason reason;
  906. u32 freq;
  907. u32 req_id;
  908. u32 scan_id;
  909. u32 vdev_id;
  910. event_type = __le32_to_cpu(event->event_type);
  911. reason = __le32_to_cpu(event->reason);
  912. freq = __le32_to_cpu(event->channel_freq);
  913. req_id = __le32_to_cpu(event->scan_req_id);
  914. scan_id = __le32_to_cpu(event->scan_id);
  915. vdev_id = __le32_to_cpu(event->vdev_id);
  916. spin_lock_bh(&ar->data_lock);
  917. ath10k_dbg(ar, ATH10K_DBG_WMI,
  918. "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
  919. ath10k_wmi_event_scan_type_str(event_type, reason),
  920. event_type, reason, freq, req_id, scan_id, vdev_id,
  921. ath10k_scan_state_str(ar->scan.state), ar->scan.state);
  922. switch (event_type) {
  923. case WMI_SCAN_EVENT_STARTED:
  924. ath10k_wmi_event_scan_started(ar);
  925. break;
  926. case WMI_SCAN_EVENT_COMPLETED:
  927. ath10k_wmi_event_scan_completed(ar);
  928. break;
  929. case WMI_SCAN_EVENT_BSS_CHANNEL:
  930. ath10k_wmi_event_scan_bss_chan(ar);
  931. break;
  932. case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
  933. ath10k_wmi_event_scan_foreign_chan(ar, freq);
  934. break;
  935. case WMI_SCAN_EVENT_START_FAILED:
  936. ath10k_warn(ar, "received scan start failure event\n");
  937. break;
  938. case WMI_SCAN_EVENT_DEQUEUED:
  939. case WMI_SCAN_EVENT_PREEMPTED:
  940. default:
  941. break;
  942. }
  943. spin_unlock_bh(&ar->data_lock);
  944. return 0;
  945. }
  946. static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
  947. {
  948. enum ieee80211_band band;
  949. switch (phy_mode) {
  950. case MODE_11A:
  951. case MODE_11NA_HT20:
  952. case MODE_11NA_HT40:
  953. case MODE_11AC_VHT20:
  954. case MODE_11AC_VHT40:
  955. case MODE_11AC_VHT80:
  956. band = IEEE80211_BAND_5GHZ;
  957. break;
  958. case MODE_11G:
  959. case MODE_11B:
  960. case MODE_11GONLY:
  961. case MODE_11NG_HT20:
  962. case MODE_11NG_HT40:
  963. case MODE_11AC_VHT20_2G:
  964. case MODE_11AC_VHT40_2G:
  965. case MODE_11AC_VHT80_2G:
  966. default:
  967. band = IEEE80211_BAND_2GHZ;
  968. }
  969. return band;
  970. }
  971. static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
  972. {
  973. u8 rate_idx = 0;
  974. /* rate in Kbps */
  975. switch (rate) {
  976. case 1000:
  977. rate_idx = 0;
  978. break;
  979. case 2000:
  980. rate_idx = 1;
  981. break;
  982. case 5500:
  983. rate_idx = 2;
  984. break;
  985. case 11000:
  986. rate_idx = 3;
  987. break;
  988. case 6000:
  989. rate_idx = 4;
  990. break;
  991. case 9000:
  992. rate_idx = 5;
  993. break;
  994. case 12000:
  995. rate_idx = 6;
  996. break;
  997. case 18000:
  998. rate_idx = 7;
  999. break;
  1000. case 24000:
  1001. rate_idx = 8;
  1002. break;
  1003. case 36000:
  1004. rate_idx = 9;
  1005. break;
  1006. case 48000:
  1007. rate_idx = 10;
  1008. break;
  1009. case 54000:
  1010. rate_idx = 11;
  1011. break;
  1012. default:
  1013. break;
  1014. }
  1015. if (band == IEEE80211_BAND_5GHZ) {
  1016. if (rate_idx > 3)
  1017. /* Omit CCK rates */
  1018. rate_idx -= 4;
  1019. else
  1020. rate_idx = 0;
  1021. }
  1022. return rate_idx;
  1023. }
  1024. /* If keys are configured, HW decrypts all frames
  1025. * with protected bit set. Mark such frames as decrypted.
  1026. */
  1027. static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
  1028. struct sk_buff *skb,
  1029. struct ieee80211_rx_status *status)
  1030. {
  1031. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1032. unsigned int hdrlen;
  1033. bool peer_key;
  1034. u8 *addr, keyidx;
  1035. if (!ieee80211_is_auth(hdr->frame_control) ||
  1036. !ieee80211_has_protected(hdr->frame_control))
  1037. return;
  1038. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  1039. if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
  1040. return;
  1041. keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
  1042. addr = ieee80211_get_SA(hdr);
  1043. spin_lock_bh(&ar->data_lock);
  1044. peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
  1045. spin_unlock_bh(&ar->data_lock);
  1046. if (peer_key) {
  1047. ath10k_dbg(ar, ATH10K_DBG_MAC,
  1048. "mac wep key present for peer %pM\n", addr);
  1049. status->flag |= RX_FLAG_DECRYPTED;
  1050. }
  1051. }
  1052. static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
  1053. {
  1054. struct wmi_mgmt_rx_event_v1 *ev_v1;
  1055. struct wmi_mgmt_rx_event_v2 *ev_v2;
  1056. struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
  1057. struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  1058. struct ieee80211_hdr *hdr;
  1059. u32 rx_status;
  1060. u32 channel;
  1061. u32 phy_mode;
  1062. u32 snr;
  1063. u32 rate;
  1064. u32 buf_len;
  1065. u16 fc;
  1066. int pull_len;
  1067. if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
  1068. ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
  1069. ev_hdr = &ev_v2->hdr.v1;
  1070. pull_len = sizeof(*ev_v2);
  1071. } else {
  1072. ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
  1073. ev_hdr = &ev_v1->hdr;
  1074. pull_len = sizeof(*ev_v1);
  1075. }
  1076. channel = __le32_to_cpu(ev_hdr->channel);
  1077. buf_len = __le32_to_cpu(ev_hdr->buf_len);
  1078. rx_status = __le32_to_cpu(ev_hdr->status);
  1079. snr = __le32_to_cpu(ev_hdr->snr);
  1080. phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
  1081. rate = __le32_to_cpu(ev_hdr->rate);
  1082. memset(status, 0, sizeof(*status));
  1083. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1084. "event mgmt rx status %08x\n", rx_status);
  1085. if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  1086. dev_kfree_skb(skb);
  1087. return 0;
  1088. }
  1089. if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
  1090. dev_kfree_skb(skb);
  1091. return 0;
  1092. }
  1093. if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
  1094. dev_kfree_skb(skb);
  1095. return 0;
  1096. }
  1097. if (rx_status & WMI_RX_STATUS_ERR_CRC) {
  1098. dev_kfree_skb(skb);
  1099. return 0;
  1100. }
  1101. if (rx_status & WMI_RX_STATUS_ERR_MIC)
  1102. status->flag |= RX_FLAG_MMIC_ERROR;
  1103. /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
  1104. * MODE_11B. This means phy_mode is not a reliable source for the band
  1105. * of mgmt rx.
  1106. */
  1107. if (channel >= 1 && channel <= 14) {
  1108. status->band = IEEE80211_BAND_2GHZ;
  1109. } else if (channel >= 36 && channel <= 165) {
  1110. status->band = IEEE80211_BAND_5GHZ;
  1111. } else {
  1112. /* Shouldn't happen unless list of advertised channels to
  1113. * mac80211 has been changed.
  1114. */
  1115. WARN_ON_ONCE(1);
  1116. dev_kfree_skb(skb);
  1117. return 0;
  1118. }
  1119. if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
  1120. ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
  1121. status->freq = ieee80211_channel_to_frequency(channel, status->band);
  1122. status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
  1123. status->rate_idx = get_rate_idx(rate, status->band);
  1124. skb_pull(skb, pull_len);
  1125. hdr = (struct ieee80211_hdr *)skb->data;
  1126. fc = le16_to_cpu(hdr->frame_control);
  1127. ath10k_wmi_handle_wep_reauth(ar, skb, status);
  1128. /* FW delivers WEP Shared Auth frame with Protected Bit set and
  1129. * encrypted payload. However in case of PMF it delivers decrypted
  1130. * frames with Protected Bit set. */
  1131. if (ieee80211_has_protected(hdr->frame_control) &&
  1132. !ieee80211_is_auth(hdr->frame_control)) {
  1133. status->flag |= RX_FLAG_DECRYPTED;
  1134. if (!ieee80211_is_action(hdr->frame_control) &&
  1135. !ieee80211_is_deauth(hdr->frame_control) &&
  1136. !ieee80211_is_disassoc(hdr->frame_control)) {
  1137. status->flag |= RX_FLAG_IV_STRIPPED |
  1138. RX_FLAG_MMIC_STRIPPED;
  1139. hdr->frame_control = __cpu_to_le16(fc &
  1140. ~IEEE80211_FCTL_PROTECTED);
  1141. }
  1142. }
  1143. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1144. "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
  1145. skb, skb->len,
  1146. fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
  1147. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1148. "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
  1149. status->freq, status->band, status->signal,
  1150. status->rate_idx);
  1151. /*
  1152. * packets from HTC come aligned to 4byte boundaries
  1153. * because they can originally come in along with a trailer
  1154. */
  1155. skb_trim(skb, buf_len);
  1156. ieee80211_rx(ar->hw, skb);
  1157. return 0;
  1158. }
  1159. static int freq_to_idx(struct ath10k *ar, int freq)
  1160. {
  1161. struct ieee80211_supported_band *sband;
  1162. int band, ch, idx = 0;
  1163. for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
  1164. sband = ar->hw->wiphy->bands[band];
  1165. if (!sband)
  1166. continue;
  1167. for (ch = 0; ch < sband->n_channels; ch++, idx++)
  1168. if (sband->channels[ch].center_freq == freq)
  1169. goto exit;
  1170. }
  1171. exit:
  1172. return idx;
  1173. }
  1174. static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
  1175. {
  1176. struct wmi_chan_info_event *ev;
  1177. struct survey_info *survey;
  1178. u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
  1179. int idx;
  1180. ev = (struct wmi_chan_info_event *)skb->data;
  1181. err_code = __le32_to_cpu(ev->err_code);
  1182. freq = __le32_to_cpu(ev->freq);
  1183. cmd_flags = __le32_to_cpu(ev->cmd_flags);
  1184. noise_floor = __le32_to_cpu(ev->noise_floor);
  1185. rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
  1186. cycle_count = __le32_to_cpu(ev->cycle_count);
  1187. ath10k_dbg(ar, ATH10K_DBG_WMI,
  1188. "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
  1189. err_code, freq, cmd_flags, noise_floor, rx_clear_count,
  1190. cycle_count);
  1191. spin_lock_bh(&ar->data_lock);
  1192. switch (ar->scan.state) {
  1193. case ATH10K_SCAN_IDLE:
  1194. case ATH10K_SCAN_STARTING:
  1195. ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
  1196. goto exit;
  1197. case ATH10K_SCAN_RUNNING:
  1198. case ATH10K_SCAN_ABORTING:
  1199. break;
  1200. }
  1201. idx = freq_to_idx(ar, freq);
  1202. if (idx >= ARRAY_SIZE(ar->survey)) {
  1203. ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
  1204. freq, idx);
  1205. goto exit;
  1206. }
  1207. if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
  1208. /* During scanning chan info is reported twice for each
  1209. * visited channel. The reported cycle count is global
  1210. * and per-channel cycle count must be calculated */
  1211. cycle_count -= ar->survey_last_cycle_count;
  1212. rx_clear_count -= ar->survey_last_rx_clear_count;
  1213. survey = &ar->survey[idx];
  1214. survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count);
  1215. survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
  1216. survey->noise = noise_floor;
  1217. survey->filled = SURVEY_INFO_CHANNEL_TIME |
  1218. SURVEY_INFO_CHANNEL_TIME_RX |
  1219. SURVEY_INFO_NOISE_DBM;
  1220. }
  1221. ar->survey_last_rx_clear_count = rx_clear_count;
  1222. ar->survey_last_cycle_count = cycle_count;
  1223. exit:
  1224. spin_unlock_bh(&ar->data_lock);
  1225. }
  1226. static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
  1227. {
  1228. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
  1229. }
  1230. static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
  1231. {
  1232. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
  1233. skb->len);
  1234. trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
  1235. return 0;
  1236. }
  1237. static void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src,
  1238. struct ath10k_fw_stats_pdev *dst)
  1239. {
  1240. const struct wal_dbg_tx_stats *tx = &src->wal.tx;
  1241. const struct wal_dbg_rx_stats *rx = &src->wal.rx;
  1242. dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
  1243. dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
  1244. dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
  1245. dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
  1246. dst->cycle_count = __le32_to_cpu(src->cycle_count);
  1247. dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
  1248. dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
  1249. dst->comp_queued = __le32_to_cpu(tx->comp_queued);
  1250. dst->comp_delivered = __le32_to_cpu(tx->comp_delivered);
  1251. dst->msdu_enqued = __le32_to_cpu(tx->msdu_enqued);
  1252. dst->mpdu_enqued = __le32_to_cpu(tx->mpdu_enqued);
  1253. dst->wmm_drop = __le32_to_cpu(tx->wmm_drop);
  1254. dst->local_enqued = __le32_to_cpu(tx->local_enqued);
  1255. dst->local_freed = __le32_to_cpu(tx->local_freed);
  1256. dst->hw_queued = __le32_to_cpu(tx->hw_queued);
  1257. dst->hw_reaped = __le32_to_cpu(tx->hw_reaped);
  1258. dst->underrun = __le32_to_cpu(tx->underrun);
  1259. dst->tx_abort = __le32_to_cpu(tx->tx_abort);
  1260. dst->mpdus_requed = __le32_to_cpu(tx->mpdus_requed);
  1261. dst->tx_ko = __le32_to_cpu(tx->tx_ko);
  1262. dst->data_rc = __le32_to_cpu(tx->data_rc);
  1263. dst->self_triggers = __le32_to_cpu(tx->self_triggers);
  1264. dst->sw_retry_failure = __le32_to_cpu(tx->sw_retry_failure);
  1265. dst->illgl_rate_phy_err = __le32_to_cpu(tx->illgl_rate_phy_err);
  1266. dst->pdev_cont_xretry = __le32_to_cpu(tx->pdev_cont_xretry);
  1267. dst->pdev_tx_timeout = __le32_to_cpu(tx->pdev_tx_timeout);
  1268. dst->pdev_resets = __le32_to_cpu(tx->pdev_resets);
  1269. dst->phy_underrun = __le32_to_cpu(tx->phy_underrun);
  1270. dst->txop_ovf = __le32_to_cpu(tx->txop_ovf);
  1271. dst->mid_ppdu_route_change = __le32_to_cpu(rx->mid_ppdu_route_change);
  1272. dst->status_rcvd = __le32_to_cpu(rx->status_rcvd);
  1273. dst->r0_frags = __le32_to_cpu(rx->r0_frags);
  1274. dst->r1_frags = __le32_to_cpu(rx->r1_frags);
  1275. dst->r2_frags = __le32_to_cpu(rx->r2_frags);
  1276. dst->r3_frags = __le32_to_cpu(rx->r3_frags);
  1277. dst->htt_msdus = __le32_to_cpu(rx->htt_msdus);
  1278. dst->htt_mpdus = __le32_to_cpu(rx->htt_mpdus);
  1279. dst->loc_msdus = __le32_to_cpu(rx->loc_msdus);
  1280. dst->loc_mpdus = __le32_to_cpu(rx->loc_mpdus);
  1281. dst->oversize_amsdu = __le32_to_cpu(rx->oversize_amsdu);
  1282. dst->phy_errs = __le32_to_cpu(rx->phy_errs);
  1283. dst->phy_err_drop = __le32_to_cpu(rx->phy_err_drop);
  1284. dst->mpdu_errs = __le32_to_cpu(rx->mpdu_errs);
  1285. }
  1286. static void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
  1287. struct ath10k_fw_stats_peer *dst)
  1288. {
  1289. ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
  1290. dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
  1291. dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
  1292. }
  1293. static int ath10k_wmi_main_pull_fw_stats(struct ath10k *ar,
  1294. struct sk_buff *skb,
  1295. struct ath10k_fw_stats *stats)
  1296. {
  1297. const struct wmi_stats_event *ev = (void *)skb->data;
  1298. u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
  1299. int i;
  1300. if (!skb_pull(skb, sizeof(*ev)))
  1301. return -EPROTO;
  1302. num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  1303. num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  1304. num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  1305. for (i = 0; i < num_pdev_stats; i++) {
  1306. const struct wmi_pdev_stats *src;
  1307. struct ath10k_fw_stats_pdev *dst;
  1308. src = (void *)skb->data;
  1309. if (!skb_pull(skb, sizeof(*src)))
  1310. return -EPROTO;
  1311. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1312. if (!dst)
  1313. continue;
  1314. ath10k_wmi_pull_pdev_stats(src, dst);
  1315. list_add_tail(&dst->list, &stats->pdevs);
  1316. }
  1317. /* fw doesn't implement vdev stats */
  1318. for (i = 0; i < num_peer_stats; i++) {
  1319. const struct wmi_peer_stats *src;
  1320. struct ath10k_fw_stats_peer *dst;
  1321. src = (void *)skb->data;
  1322. if (!skb_pull(skb, sizeof(*src)))
  1323. return -EPROTO;
  1324. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1325. if (!dst)
  1326. continue;
  1327. ath10k_wmi_pull_peer_stats(src, dst);
  1328. list_add_tail(&dst->list, &stats->peers);
  1329. }
  1330. return 0;
  1331. }
  1332. static int ath10k_wmi_10x_pull_fw_stats(struct ath10k *ar,
  1333. struct sk_buff *skb,
  1334. struct ath10k_fw_stats *stats)
  1335. {
  1336. const struct wmi_stats_event *ev = (void *)skb->data;
  1337. u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
  1338. int i;
  1339. if (!skb_pull(skb, sizeof(*ev)))
  1340. return -EPROTO;
  1341. num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  1342. num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  1343. num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  1344. for (i = 0; i < num_pdev_stats; i++) {
  1345. const struct wmi_10x_pdev_stats *src;
  1346. struct ath10k_fw_stats_pdev *dst;
  1347. src = (void *)skb->data;
  1348. if (!skb_pull(skb, sizeof(*src)))
  1349. return -EPROTO;
  1350. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1351. if (!dst)
  1352. continue;
  1353. ath10k_wmi_pull_pdev_stats(&src->old, dst);
  1354. dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
  1355. dst->rts_bad = __le32_to_cpu(src->rts_bad);
  1356. dst->rts_good = __le32_to_cpu(src->rts_good);
  1357. dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
  1358. dst->no_beacons = __le32_to_cpu(src->no_beacons);
  1359. dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
  1360. list_add_tail(&dst->list, &stats->pdevs);
  1361. }
  1362. /* fw doesn't implement vdev stats */
  1363. for (i = 0; i < num_peer_stats; i++) {
  1364. const struct wmi_10x_peer_stats *src;
  1365. struct ath10k_fw_stats_peer *dst;
  1366. src = (void *)skb->data;
  1367. if (!skb_pull(skb, sizeof(*src)))
  1368. return -EPROTO;
  1369. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1370. if (!dst)
  1371. continue;
  1372. ath10k_wmi_pull_peer_stats(&src->old, dst);
  1373. dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
  1374. list_add_tail(&dst->list, &stats->peers);
  1375. }
  1376. return 0;
  1377. }
  1378. int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
  1379. struct ath10k_fw_stats *stats)
  1380. {
  1381. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  1382. return ath10k_wmi_10x_pull_fw_stats(ar, skb, stats);
  1383. else
  1384. return ath10k_wmi_main_pull_fw_stats(ar, skb, stats);
  1385. }
  1386. static void ath10k_wmi_event_update_stats(struct ath10k *ar,
  1387. struct sk_buff *skb)
  1388. {
  1389. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
  1390. ath10k_debug_fw_stats_process(ar, skb);
  1391. }
  1392. static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
  1393. struct sk_buff *skb)
  1394. {
  1395. struct wmi_vdev_start_response_event *ev;
  1396. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
  1397. ev = (struct wmi_vdev_start_response_event *)skb->data;
  1398. if (WARN_ON(__le32_to_cpu(ev->status)))
  1399. return;
  1400. complete(&ar->vdev_setup_done);
  1401. }
  1402. static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
  1403. struct sk_buff *skb)
  1404. {
  1405. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
  1406. complete(&ar->vdev_setup_done);
  1407. }
  1408. static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
  1409. struct sk_buff *skb)
  1410. {
  1411. struct wmi_peer_sta_kickout_event *ev;
  1412. struct ieee80211_sta *sta;
  1413. ev = (struct wmi_peer_sta_kickout_event *)skb->data;
  1414. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
  1415. ev->peer_macaddr.addr);
  1416. rcu_read_lock();
  1417. sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL);
  1418. if (!sta) {
  1419. ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
  1420. ev->peer_macaddr.addr);
  1421. goto exit;
  1422. }
  1423. ieee80211_report_low_ack(sta, 10);
  1424. exit:
  1425. rcu_read_unlock();
  1426. }
  1427. /*
  1428. * FIXME
  1429. *
  1430. * We don't report to mac80211 sleep state of connected
  1431. * stations. Due to this mac80211 can't fill in TIM IE
  1432. * correctly.
  1433. *
  1434. * I know of no way of getting nullfunc frames that contain
  1435. * sleep transition from connected stations - these do not
  1436. * seem to be sent from the target to the host. There also
  1437. * doesn't seem to be a dedicated event for that. So the
  1438. * only way left to do this would be to read tim_bitmap
  1439. * during SWBA.
  1440. *
  1441. * We could probably try using tim_bitmap from SWBA to tell
  1442. * mac80211 which stations are asleep and which are not. The
  1443. * problem here is calling mac80211 functions so many times
  1444. * could take too long and make us miss the time to submit
  1445. * the beacon to the target.
  1446. *
  1447. * So as a workaround we try to extend the TIM IE if there
  1448. * is unicast buffered for stations with aid > 7 and fill it
  1449. * in ourselves.
  1450. */
  1451. static void ath10k_wmi_update_tim(struct ath10k *ar,
  1452. struct ath10k_vif *arvif,
  1453. struct sk_buff *bcn,
  1454. struct wmi_bcn_info *bcn_info)
  1455. {
  1456. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
  1457. struct ieee80211_tim_ie *tim;
  1458. u8 *ies, *ie;
  1459. u8 ie_len, pvm_len;
  1460. __le32 t;
  1461. u32 v;
  1462. /* if next SWBA has no tim_changed the tim_bitmap is garbage.
  1463. * we must copy the bitmap upon change and reuse it later */
  1464. if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
  1465. int i;
  1466. BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
  1467. sizeof(bcn_info->tim_info.tim_bitmap));
  1468. for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
  1469. t = bcn_info->tim_info.tim_bitmap[i / 4];
  1470. v = __le32_to_cpu(t);
  1471. arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
  1472. }
  1473. /* FW reports either length 0 or 16
  1474. * so we calculate this on our own */
  1475. arvif->u.ap.tim_len = 0;
  1476. for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
  1477. if (arvif->u.ap.tim_bitmap[i])
  1478. arvif->u.ap.tim_len = i;
  1479. arvif->u.ap.tim_len++;
  1480. }
  1481. ies = bcn->data;
  1482. ies += ieee80211_hdrlen(hdr->frame_control);
  1483. ies += 12; /* fixed parameters */
  1484. ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
  1485. (u8 *)skb_tail_pointer(bcn) - ies);
  1486. if (!ie) {
  1487. if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
  1488. ath10k_warn(ar, "no tim ie found;\n");
  1489. return;
  1490. }
  1491. tim = (void *)ie + 2;
  1492. ie_len = ie[1];
  1493. pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
  1494. if (pvm_len < arvif->u.ap.tim_len) {
  1495. int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
  1496. int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
  1497. void *next_ie = ie + 2 + ie_len;
  1498. if (skb_put(bcn, expand_size)) {
  1499. memmove(next_ie + expand_size, next_ie, move_size);
  1500. ie[1] += expand_size;
  1501. ie_len += expand_size;
  1502. pvm_len += expand_size;
  1503. } else {
  1504. ath10k_warn(ar, "tim expansion failed\n");
  1505. }
  1506. }
  1507. if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
  1508. ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
  1509. return;
  1510. }
  1511. tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
  1512. memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
  1513. if (tim->dtim_count == 0) {
  1514. ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
  1515. if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1)
  1516. ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
  1517. }
  1518. ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
  1519. tim->dtim_count, tim->dtim_period,
  1520. tim->bitmap_ctrl, pvm_len);
  1521. }
  1522. static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
  1523. struct wmi_p2p_noa_info *noa)
  1524. {
  1525. struct ieee80211_p2p_noa_attr *noa_attr;
  1526. u8 ctwindow_oppps = noa->ctwindow_oppps;
  1527. u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
  1528. bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
  1529. __le16 *noa_attr_len;
  1530. u16 attr_len;
  1531. u8 noa_descriptors = noa->num_descriptors;
  1532. int i;
  1533. /* P2P IE */
  1534. data[0] = WLAN_EID_VENDOR_SPECIFIC;
  1535. data[1] = len - 2;
  1536. data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
  1537. data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
  1538. data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
  1539. data[5] = WLAN_OUI_TYPE_WFA_P2P;
  1540. /* NOA ATTR */
  1541. data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
  1542. noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
  1543. noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
  1544. noa_attr->index = noa->index;
  1545. noa_attr->oppps_ctwindow = ctwindow;
  1546. if (oppps)
  1547. noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
  1548. for (i = 0; i < noa_descriptors; i++) {
  1549. noa_attr->desc[i].count =
  1550. __le32_to_cpu(noa->descriptors[i].type_count);
  1551. noa_attr->desc[i].duration = noa->descriptors[i].duration;
  1552. noa_attr->desc[i].interval = noa->descriptors[i].interval;
  1553. noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
  1554. }
  1555. attr_len = 2; /* index + oppps_ctwindow */
  1556. attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
  1557. *noa_attr_len = __cpu_to_le16(attr_len);
  1558. }
  1559. static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
  1560. {
  1561. u32 len = 0;
  1562. u8 noa_descriptors = noa->num_descriptors;
  1563. u8 opp_ps_info = noa->ctwindow_oppps;
  1564. bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
  1565. if (!noa_descriptors && !opps_enabled)
  1566. return len;
  1567. len += 1 + 1 + 4; /* EID + len + OUI */
  1568. len += 1 + 2; /* noa attr + attr len */
  1569. len += 1 + 1; /* index + oppps_ctwindow */
  1570. len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
  1571. return len;
  1572. }
  1573. static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
  1574. struct sk_buff *bcn,
  1575. struct wmi_bcn_info *bcn_info)
  1576. {
  1577. struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
  1578. u8 *new_data, *old_data = arvif->u.ap.noa_data;
  1579. u32 new_len;
  1580. if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
  1581. return;
  1582. ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
  1583. if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
  1584. new_len = ath10k_p2p_calc_noa_ie_len(noa);
  1585. if (!new_len)
  1586. goto cleanup;
  1587. new_data = kmalloc(new_len, GFP_ATOMIC);
  1588. if (!new_data)
  1589. goto cleanup;
  1590. ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
  1591. spin_lock_bh(&ar->data_lock);
  1592. arvif->u.ap.noa_data = new_data;
  1593. arvif->u.ap.noa_len = new_len;
  1594. spin_unlock_bh(&ar->data_lock);
  1595. kfree(old_data);
  1596. }
  1597. if (arvif->u.ap.noa_data)
  1598. if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
  1599. memcpy(skb_put(bcn, arvif->u.ap.noa_len),
  1600. arvif->u.ap.noa_data,
  1601. arvif->u.ap.noa_len);
  1602. return;
  1603. cleanup:
  1604. spin_lock_bh(&ar->data_lock);
  1605. arvif->u.ap.noa_data = NULL;
  1606. arvif->u.ap.noa_len = 0;
  1607. spin_unlock_bh(&ar->data_lock);
  1608. kfree(old_data);
  1609. }
  1610. static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
  1611. {
  1612. struct wmi_host_swba_event *ev;
  1613. u32 map;
  1614. int i = -1;
  1615. struct wmi_bcn_info *bcn_info;
  1616. struct ath10k_vif *arvif;
  1617. struct sk_buff *bcn;
  1618. dma_addr_t paddr;
  1619. int ret, vdev_id = 0;
  1620. ev = (struct wmi_host_swba_event *)skb->data;
  1621. map = __le32_to_cpu(ev->vdev_map);
  1622. ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
  1623. ev->vdev_map);
  1624. for (; map; map >>= 1, vdev_id++) {
  1625. if (!(map & 0x1))
  1626. continue;
  1627. i++;
  1628. if (i >= WMI_MAX_AP_VDEV) {
  1629. ath10k_warn(ar, "swba has corrupted vdev map\n");
  1630. break;
  1631. }
  1632. bcn_info = &ev->bcn_info[i];
  1633. ath10k_dbg(ar, ATH10K_DBG_MGMT,
  1634. "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
  1635. i,
  1636. __le32_to_cpu(bcn_info->tim_info.tim_len),
  1637. __le32_to_cpu(bcn_info->tim_info.tim_mcast),
  1638. __le32_to_cpu(bcn_info->tim_info.tim_changed),
  1639. __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
  1640. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
  1641. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
  1642. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
  1643. __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
  1644. arvif = ath10k_get_arvif(ar, vdev_id);
  1645. if (arvif == NULL) {
  1646. ath10k_warn(ar, "no vif for vdev_id %d found\n",
  1647. vdev_id);
  1648. continue;
  1649. }
  1650. /* There are no completions for beacons so wait for next SWBA
  1651. * before telling mac80211 to decrement CSA counter
  1652. *
  1653. * Once CSA counter is completed stop sending beacons until
  1654. * actual channel switch is done */
  1655. if (arvif->vif->csa_active &&
  1656. ieee80211_csa_is_complete(arvif->vif)) {
  1657. ieee80211_csa_finish(arvif->vif);
  1658. continue;
  1659. }
  1660. bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
  1661. if (!bcn) {
  1662. ath10k_warn(ar, "could not get mac80211 beacon\n");
  1663. continue;
  1664. }
  1665. ath10k_tx_h_seq_no(arvif->vif, bcn);
  1666. ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
  1667. ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
  1668. spin_lock_bh(&ar->data_lock);
  1669. if (arvif->beacon) {
  1670. if (!arvif->beacon_sent)
  1671. ath10k_warn(ar, "SWBA overrun on vdev %d\n",
  1672. arvif->vdev_id);
  1673. ath10k_mac_vif_beacon_free(arvif);
  1674. }
  1675. if (!arvif->beacon_buf) {
  1676. paddr = dma_map_single(arvif->ar->dev, bcn->data,
  1677. bcn->len, DMA_TO_DEVICE);
  1678. ret = dma_mapping_error(arvif->ar->dev, paddr);
  1679. if (ret) {
  1680. ath10k_warn(ar, "failed to map beacon: %d\n",
  1681. ret);
  1682. dev_kfree_skb_any(bcn);
  1683. goto skip;
  1684. }
  1685. ATH10K_SKB_CB(bcn)->paddr = paddr;
  1686. } else {
  1687. if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
  1688. ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
  1689. bcn->len, IEEE80211_MAX_FRAME_LEN);
  1690. skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
  1691. }
  1692. memcpy(arvif->beacon_buf, bcn->data, bcn->len);
  1693. ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
  1694. }
  1695. arvif->beacon = bcn;
  1696. arvif->beacon_sent = false;
  1697. trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
  1698. trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
  1699. ath10k_wmi_tx_beacon_nowait(arvif);
  1700. skip:
  1701. spin_unlock_bh(&ar->data_lock);
  1702. }
  1703. }
  1704. static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
  1705. struct sk_buff *skb)
  1706. {
  1707. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
  1708. }
  1709. static void ath10k_dfs_radar_report(struct ath10k *ar,
  1710. const struct wmi_phyerr *phyerr,
  1711. const struct phyerr_radar_report *rr,
  1712. u64 tsf)
  1713. {
  1714. u32 reg0, reg1, tsf32l;
  1715. struct pulse_event pe;
  1716. u64 tsf64;
  1717. u8 rssi, width;
  1718. reg0 = __le32_to_cpu(rr->reg0);
  1719. reg1 = __le32_to_cpu(rr->reg1);
  1720. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1721. "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
  1722. MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
  1723. MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
  1724. MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
  1725. MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
  1726. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1727. "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
  1728. MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
  1729. MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
  1730. MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
  1731. MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
  1732. MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
  1733. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1734. "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
  1735. MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
  1736. MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
  1737. if (!ar->dfs_detector)
  1738. return;
  1739. /* report event to DFS pattern detector */
  1740. tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
  1741. tsf64 = tsf & (~0xFFFFFFFFULL);
  1742. tsf64 |= tsf32l;
  1743. width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
  1744. rssi = phyerr->rssi_combined;
  1745. /* hardware store this as 8 bit signed value,
  1746. * set to zero if negative number
  1747. */
  1748. if (rssi & 0x80)
  1749. rssi = 0;
  1750. pe.ts = tsf64;
  1751. pe.freq = ar->hw->conf.chandef.chan->center_freq;
  1752. pe.width = width;
  1753. pe.rssi = rssi;
  1754. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1755. "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
  1756. pe.freq, pe.width, pe.rssi, pe.ts);
  1757. ATH10K_DFS_STAT_INC(ar, pulses_detected);
  1758. if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
  1759. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1760. "dfs no pulse pattern detected, yet\n");
  1761. return;
  1762. }
  1763. ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
  1764. ATH10K_DFS_STAT_INC(ar, radar_detected);
  1765. /* Control radar events reporting in debugfs file
  1766. dfs_block_radar_events */
  1767. if (ar->dfs_block_radar_events) {
  1768. ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
  1769. return;
  1770. }
  1771. ieee80211_radar_detected(ar->hw);
  1772. }
  1773. static int ath10k_dfs_fft_report(struct ath10k *ar,
  1774. const struct wmi_phyerr *phyerr,
  1775. const struct phyerr_fft_report *fftr,
  1776. u64 tsf)
  1777. {
  1778. u32 reg0, reg1;
  1779. u8 rssi, peak_mag;
  1780. reg0 = __le32_to_cpu(fftr->reg0);
  1781. reg1 = __le32_to_cpu(fftr->reg1);
  1782. rssi = phyerr->rssi_combined;
  1783. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1784. "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
  1785. MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
  1786. MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
  1787. MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
  1788. MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
  1789. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1790. "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
  1791. MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
  1792. MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
  1793. MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
  1794. MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
  1795. peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
  1796. /* false event detection */
  1797. if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
  1798. peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
  1799. ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
  1800. ATH10K_DFS_STAT_INC(ar, pulses_discarded);
  1801. return -EINVAL;
  1802. }
  1803. return 0;
  1804. }
  1805. static void ath10k_wmi_event_dfs(struct ath10k *ar,
  1806. const struct wmi_phyerr *phyerr,
  1807. u64 tsf)
  1808. {
  1809. int buf_len, tlv_len, res, i = 0;
  1810. const struct phyerr_tlv *tlv;
  1811. const struct phyerr_radar_report *rr;
  1812. const struct phyerr_fft_report *fftr;
  1813. const u8 *tlv_buf;
  1814. buf_len = __le32_to_cpu(phyerr->buf_len);
  1815. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1816. "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
  1817. phyerr->phy_err_code, phyerr->rssi_combined,
  1818. __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len);
  1819. /* Skip event if DFS disabled */
  1820. if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
  1821. return;
  1822. ATH10K_DFS_STAT_INC(ar, pulses_total);
  1823. while (i < buf_len) {
  1824. if (i + sizeof(*tlv) > buf_len) {
  1825. ath10k_warn(ar, "too short buf for tlv header (%d)\n",
  1826. i);
  1827. return;
  1828. }
  1829. tlv = (struct phyerr_tlv *)&phyerr->buf[i];
  1830. tlv_len = __le16_to_cpu(tlv->len);
  1831. tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
  1832. ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
  1833. "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
  1834. tlv_len, tlv->tag, tlv->sig);
  1835. switch (tlv->tag) {
  1836. case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
  1837. if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
  1838. ath10k_warn(ar, "too short radar pulse summary (%d)\n",
  1839. i);
  1840. return;
  1841. }
  1842. rr = (struct phyerr_radar_report *)tlv_buf;
  1843. ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
  1844. break;
  1845. case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
  1846. if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
  1847. ath10k_warn(ar, "too short fft report (%d)\n",
  1848. i);
  1849. return;
  1850. }
  1851. fftr = (struct phyerr_fft_report *)tlv_buf;
  1852. res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
  1853. if (res)
  1854. return;
  1855. break;
  1856. }
  1857. i += sizeof(*tlv) + tlv_len;
  1858. }
  1859. }
  1860. static void
  1861. ath10k_wmi_event_spectral_scan(struct ath10k *ar,
  1862. const struct wmi_phyerr *phyerr,
  1863. u64 tsf)
  1864. {
  1865. int buf_len, tlv_len, res, i = 0;
  1866. struct phyerr_tlv *tlv;
  1867. const void *tlv_buf;
  1868. const struct phyerr_fft_report *fftr;
  1869. size_t fftr_len;
  1870. buf_len = __le32_to_cpu(phyerr->buf_len);
  1871. while (i < buf_len) {
  1872. if (i + sizeof(*tlv) > buf_len) {
  1873. ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
  1874. i);
  1875. return;
  1876. }
  1877. tlv = (struct phyerr_tlv *)&phyerr->buf[i];
  1878. tlv_len = __le16_to_cpu(tlv->len);
  1879. tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
  1880. if (i + sizeof(*tlv) + tlv_len > buf_len) {
  1881. ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
  1882. i);
  1883. return;
  1884. }
  1885. switch (tlv->tag) {
  1886. case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
  1887. if (sizeof(*fftr) > tlv_len) {
  1888. ath10k_warn(ar, "failed to parse fft report at byte %d\n",
  1889. i);
  1890. return;
  1891. }
  1892. fftr_len = tlv_len - sizeof(*fftr);
  1893. fftr = tlv_buf;
  1894. res = ath10k_spectral_process_fft(ar, phyerr,
  1895. fftr, fftr_len,
  1896. tsf);
  1897. if (res < 0) {
  1898. ath10k_warn(ar, "failed to process fft report: %d\n",
  1899. res);
  1900. return;
  1901. }
  1902. break;
  1903. }
  1904. i += sizeof(*tlv) + tlv_len;
  1905. }
  1906. }
  1907. static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
  1908. {
  1909. const struct wmi_phyerr_event *ev;
  1910. const struct wmi_phyerr *phyerr;
  1911. u32 count, i, buf_len, phy_err_code;
  1912. u64 tsf;
  1913. int left_len = skb->len;
  1914. ATH10K_DFS_STAT_INC(ar, phy_errors);
  1915. /* Check if combined event available */
  1916. if (left_len < sizeof(*ev)) {
  1917. ath10k_warn(ar, "wmi phyerr combined event wrong len\n");
  1918. return;
  1919. }
  1920. left_len -= sizeof(*ev);
  1921. /* Check number of included events */
  1922. ev = (const struct wmi_phyerr_event *)skb->data;
  1923. count = __le32_to_cpu(ev->num_phyerrs);
  1924. tsf = __le32_to_cpu(ev->tsf_u32);
  1925. tsf <<= 32;
  1926. tsf |= __le32_to_cpu(ev->tsf_l32);
  1927. ath10k_dbg(ar, ATH10K_DBG_WMI,
  1928. "wmi event phyerr count %d tsf64 0x%llX\n",
  1929. count, tsf);
  1930. phyerr = ev->phyerrs;
  1931. for (i = 0; i < count; i++) {
  1932. /* Check if we can read event header */
  1933. if (left_len < sizeof(*phyerr)) {
  1934. ath10k_warn(ar, "single event (%d) wrong head len\n",
  1935. i);
  1936. return;
  1937. }
  1938. left_len -= sizeof(*phyerr);
  1939. buf_len = __le32_to_cpu(phyerr->buf_len);
  1940. phy_err_code = phyerr->phy_err_code;
  1941. if (left_len < buf_len) {
  1942. ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
  1943. return;
  1944. }
  1945. left_len -= buf_len;
  1946. switch (phy_err_code) {
  1947. case PHY_ERROR_RADAR:
  1948. ath10k_wmi_event_dfs(ar, phyerr, tsf);
  1949. break;
  1950. case PHY_ERROR_SPECTRAL_SCAN:
  1951. ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
  1952. break;
  1953. case PHY_ERROR_FALSE_RADAR_EXT:
  1954. ath10k_wmi_event_dfs(ar, phyerr, tsf);
  1955. ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
  1956. break;
  1957. default:
  1958. break;
  1959. }
  1960. phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len;
  1961. }
  1962. }
  1963. static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
  1964. {
  1965. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
  1966. }
  1967. static void ath10k_wmi_event_profile_match(struct ath10k *ar,
  1968. struct sk_buff *skb)
  1969. {
  1970. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
  1971. }
  1972. static void ath10k_wmi_event_debug_print(struct ath10k *ar,
  1973. struct sk_buff *skb)
  1974. {
  1975. char buf[101], c;
  1976. int i;
  1977. for (i = 0; i < sizeof(buf) - 1; i++) {
  1978. if (i >= skb->len)
  1979. break;
  1980. c = skb->data[i];
  1981. if (c == '\0')
  1982. break;
  1983. if (isascii(c) && isprint(c))
  1984. buf[i] = c;
  1985. else
  1986. buf[i] = '.';
  1987. }
  1988. if (i == sizeof(buf) - 1)
  1989. ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
  1990. /* for some reason the debug prints end with \n, remove that */
  1991. if (skb->data[i - 1] == '\n')
  1992. i--;
  1993. /* the last byte is always reserved for the null character */
  1994. buf[i] = '\0';
  1995. ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
  1996. }
  1997. static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
  1998. {
  1999. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
  2000. }
  2001. static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
  2002. struct sk_buff *skb)
  2003. {
  2004. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
  2005. }
  2006. static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
  2007. struct sk_buff *skb)
  2008. {
  2009. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
  2010. }
  2011. static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
  2012. struct sk_buff *skb)
  2013. {
  2014. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
  2015. }
  2016. static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
  2017. struct sk_buff *skb)
  2018. {
  2019. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
  2020. }
  2021. static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
  2022. struct sk_buff *skb)
  2023. {
  2024. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
  2025. }
  2026. static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
  2027. struct sk_buff *skb)
  2028. {
  2029. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
  2030. }
  2031. static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
  2032. struct sk_buff *skb)
  2033. {
  2034. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
  2035. }
  2036. static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
  2037. struct sk_buff *skb)
  2038. {
  2039. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
  2040. }
  2041. static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
  2042. struct sk_buff *skb)
  2043. {
  2044. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
  2045. }
  2046. static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
  2047. struct sk_buff *skb)
  2048. {
  2049. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
  2050. }
  2051. static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
  2052. struct sk_buff *skb)
  2053. {
  2054. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
  2055. }
  2056. static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
  2057. struct sk_buff *skb)
  2058. {
  2059. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
  2060. }
  2061. static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
  2062. struct sk_buff *skb)
  2063. {
  2064. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
  2065. }
  2066. static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
  2067. struct sk_buff *skb)
  2068. {
  2069. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
  2070. }
  2071. static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
  2072. struct sk_buff *skb)
  2073. {
  2074. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
  2075. }
  2076. static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
  2077. struct sk_buff *skb)
  2078. {
  2079. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
  2080. }
  2081. static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
  2082. u32 num_units, u32 unit_len)
  2083. {
  2084. dma_addr_t paddr;
  2085. u32 pool_size;
  2086. int idx = ar->wmi.num_mem_chunks;
  2087. pool_size = num_units * round_up(unit_len, 4);
  2088. if (!pool_size)
  2089. return -EINVAL;
  2090. ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
  2091. pool_size,
  2092. &paddr,
  2093. GFP_ATOMIC);
  2094. if (!ar->wmi.mem_chunks[idx].vaddr) {
  2095. ath10k_warn(ar, "failed to allocate memory chunk\n");
  2096. return -ENOMEM;
  2097. }
  2098. memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
  2099. ar->wmi.mem_chunks[idx].paddr = paddr;
  2100. ar->wmi.mem_chunks[idx].len = pool_size;
  2101. ar->wmi.mem_chunks[idx].req_id = req_id;
  2102. ar->wmi.num_mem_chunks++;
  2103. return 0;
  2104. }
  2105. static int ath10k_wmi_main_pull_svc_rdy_ev(struct sk_buff *skb,
  2106. struct wmi_svc_rdy_ev_arg *arg)
  2107. {
  2108. struct wmi_service_ready_event *ev;
  2109. size_t i, n;
  2110. if (skb->len < sizeof(*ev))
  2111. return -EPROTO;
  2112. ev = (void *)skb->data;
  2113. skb_pull(skb, sizeof(*ev));
  2114. arg->min_tx_power = ev->hw_min_tx_power;
  2115. arg->max_tx_power = ev->hw_max_tx_power;
  2116. arg->ht_cap = ev->ht_cap_info;
  2117. arg->vht_cap = ev->vht_cap_info;
  2118. arg->sw_ver0 = ev->sw_version;
  2119. arg->sw_ver1 = ev->sw_version_1;
  2120. arg->phy_capab = ev->phy_capability;
  2121. arg->num_rf_chains = ev->num_rf_chains;
  2122. arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
  2123. arg->num_mem_reqs = ev->num_mem_reqs;
  2124. arg->service_map = ev->wmi_service_bitmap;
  2125. arg->service_map_len = sizeof(ev->wmi_service_bitmap);
  2126. n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
  2127. ARRAY_SIZE(arg->mem_reqs));
  2128. for (i = 0; i < n; i++)
  2129. arg->mem_reqs[i] = &ev->mem_reqs[i];
  2130. if (skb->len <
  2131. __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
  2132. return -EPROTO;
  2133. return 0;
  2134. }
  2135. static int ath10k_wmi_10x_pull_svc_rdy_ev(struct sk_buff *skb,
  2136. struct wmi_svc_rdy_ev_arg *arg)
  2137. {
  2138. struct wmi_10x_service_ready_event *ev;
  2139. int i, n;
  2140. if (skb->len < sizeof(*ev))
  2141. return -EPROTO;
  2142. ev = (void *)skb->data;
  2143. skb_pull(skb, sizeof(*ev));
  2144. arg->min_tx_power = ev->hw_min_tx_power;
  2145. arg->max_tx_power = ev->hw_max_tx_power;
  2146. arg->ht_cap = ev->ht_cap_info;
  2147. arg->vht_cap = ev->vht_cap_info;
  2148. arg->sw_ver0 = ev->sw_version;
  2149. arg->phy_capab = ev->phy_capability;
  2150. arg->num_rf_chains = ev->num_rf_chains;
  2151. arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
  2152. arg->num_mem_reqs = ev->num_mem_reqs;
  2153. arg->service_map = ev->wmi_service_bitmap;
  2154. arg->service_map_len = sizeof(ev->wmi_service_bitmap);
  2155. n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
  2156. ARRAY_SIZE(arg->mem_reqs));
  2157. for (i = 0; i < n; i++)
  2158. arg->mem_reqs[i] = &ev->mem_reqs[i];
  2159. if (skb->len <
  2160. __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
  2161. return -EPROTO;
  2162. return 0;
  2163. }
  2164. static void ath10k_wmi_event_service_ready(struct ath10k *ar,
  2165. struct sk_buff *skb)
  2166. {
  2167. struct wmi_svc_rdy_ev_arg arg = {};
  2168. u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
  2169. int ret;
  2170. memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
  2171. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  2172. ret = ath10k_wmi_10x_pull_svc_rdy_ev(skb, &arg);
  2173. wmi_10x_svc_map(arg.service_map, ar->wmi.svc_map,
  2174. arg.service_map_len);
  2175. } else {
  2176. ret = ath10k_wmi_main_pull_svc_rdy_ev(skb, &arg);
  2177. wmi_main_svc_map(arg.service_map, ar->wmi.svc_map,
  2178. arg.service_map_len);
  2179. }
  2180. if (ret) {
  2181. ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
  2182. return;
  2183. }
  2184. ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
  2185. ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
  2186. ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
  2187. ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
  2188. ar->fw_version_major =
  2189. (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
  2190. ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
  2191. ar->fw_version_release =
  2192. (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
  2193. ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
  2194. ar->phy_capability = __le32_to_cpu(arg.phy_capab);
  2195. ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
  2196. ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd);
  2197. ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
  2198. arg.service_map, arg.service_map_len);
  2199. /* only manually set fw features when not using FW IE format */
  2200. if (ar->fw_api == 1 && ar->fw_version_build > 636)
  2201. set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
  2202. if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
  2203. ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
  2204. ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
  2205. ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
  2206. }
  2207. ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
  2208. ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1;
  2209. if (strlen(ar->hw->wiphy->fw_version) == 0) {
  2210. snprintf(ar->hw->wiphy->fw_version,
  2211. sizeof(ar->hw->wiphy->fw_version),
  2212. "%u.%u.%u.%u",
  2213. ar->fw_version_major,
  2214. ar->fw_version_minor,
  2215. ar->fw_version_release,
  2216. ar->fw_version_build);
  2217. }
  2218. num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
  2219. if (num_mem_reqs > WMI_MAX_MEM_REQS) {
  2220. ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
  2221. num_mem_reqs);
  2222. return;
  2223. }
  2224. for (i = 0; i < num_mem_reqs; ++i) {
  2225. req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
  2226. num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
  2227. unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
  2228. num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
  2229. if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
  2230. /* number of units to allocate is number of
  2231. * peers, 1 extra for self peer on target */
  2232. /* this needs to be tied, host and target
  2233. * can get out of sync */
  2234. num_units = TARGET_10X_NUM_PEERS + 1;
  2235. else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
  2236. num_units = TARGET_10X_NUM_VDEVS + 1;
  2237. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2238. "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
  2239. req_id,
  2240. __le32_to_cpu(arg.mem_reqs[i]->num_units),
  2241. num_unit_info,
  2242. unit_size,
  2243. num_units);
  2244. ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
  2245. unit_size);
  2246. if (ret)
  2247. return;
  2248. }
  2249. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2250. "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
  2251. __le32_to_cpu(arg.min_tx_power),
  2252. __le32_to_cpu(arg.max_tx_power),
  2253. __le32_to_cpu(arg.ht_cap),
  2254. __le32_to_cpu(arg.vht_cap),
  2255. __le32_to_cpu(arg.sw_ver0),
  2256. __le32_to_cpu(arg.sw_ver1),
  2257. __le32_to_cpu(arg.phy_capab),
  2258. __le32_to_cpu(arg.num_rf_chains),
  2259. __le32_to_cpu(arg.eeprom_rd),
  2260. __le32_to_cpu(arg.num_mem_reqs));
  2261. complete(&ar->wmi.service_ready);
  2262. }
  2263. static int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
  2264. {
  2265. struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
  2266. if (WARN_ON(skb->len < sizeof(*ev)))
  2267. return -EINVAL;
  2268. ether_addr_copy(ar->mac_addr, ev->mac_addr.addr);
  2269. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2270. "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
  2271. __le32_to_cpu(ev->sw_version),
  2272. __le32_to_cpu(ev->abi_version),
  2273. ev->mac_addr.addr,
  2274. __le32_to_cpu(ev->status), skb->len, sizeof(*ev));
  2275. complete(&ar->wmi.unified_ready);
  2276. return 0;
  2277. }
  2278. static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
  2279. {
  2280. struct wmi_cmd_hdr *cmd_hdr;
  2281. enum wmi_event_id id;
  2282. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  2283. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  2284. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  2285. return;
  2286. trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  2287. switch (id) {
  2288. case WMI_MGMT_RX_EVENTID:
  2289. ath10k_wmi_event_mgmt_rx(ar, skb);
  2290. /* mgmt_rx() owns the skb now! */
  2291. return;
  2292. case WMI_SCAN_EVENTID:
  2293. ath10k_wmi_event_scan(ar, skb);
  2294. break;
  2295. case WMI_CHAN_INFO_EVENTID:
  2296. ath10k_wmi_event_chan_info(ar, skb);
  2297. break;
  2298. case WMI_ECHO_EVENTID:
  2299. ath10k_wmi_event_echo(ar, skb);
  2300. break;
  2301. case WMI_DEBUG_MESG_EVENTID:
  2302. ath10k_wmi_event_debug_mesg(ar, skb);
  2303. break;
  2304. case WMI_UPDATE_STATS_EVENTID:
  2305. ath10k_wmi_event_update_stats(ar, skb);
  2306. break;
  2307. case WMI_VDEV_START_RESP_EVENTID:
  2308. ath10k_wmi_event_vdev_start_resp(ar, skb);
  2309. break;
  2310. case WMI_VDEV_STOPPED_EVENTID:
  2311. ath10k_wmi_event_vdev_stopped(ar, skb);
  2312. break;
  2313. case WMI_PEER_STA_KICKOUT_EVENTID:
  2314. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  2315. break;
  2316. case WMI_HOST_SWBA_EVENTID:
  2317. ath10k_wmi_event_host_swba(ar, skb);
  2318. break;
  2319. case WMI_TBTTOFFSET_UPDATE_EVENTID:
  2320. ath10k_wmi_event_tbttoffset_update(ar, skb);
  2321. break;
  2322. case WMI_PHYERR_EVENTID:
  2323. ath10k_wmi_event_phyerr(ar, skb);
  2324. break;
  2325. case WMI_ROAM_EVENTID:
  2326. ath10k_wmi_event_roam(ar, skb);
  2327. break;
  2328. case WMI_PROFILE_MATCH:
  2329. ath10k_wmi_event_profile_match(ar, skb);
  2330. break;
  2331. case WMI_DEBUG_PRINT_EVENTID:
  2332. ath10k_wmi_event_debug_print(ar, skb);
  2333. break;
  2334. case WMI_PDEV_QVIT_EVENTID:
  2335. ath10k_wmi_event_pdev_qvit(ar, skb);
  2336. break;
  2337. case WMI_WLAN_PROFILE_DATA_EVENTID:
  2338. ath10k_wmi_event_wlan_profile_data(ar, skb);
  2339. break;
  2340. case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
  2341. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  2342. break;
  2343. case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
  2344. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  2345. break;
  2346. case WMI_RTT_ERROR_REPORT_EVENTID:
  2347. ath10k_wmi_event_rtt_error_report(ar, skb);
  2348. break;
  2349. case WMI_WOW_WAKEUP_HOST_EVENTID:
  2350. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  2351. break;
  2352. case WMI_DCS_INTERFERENCE_EVENTID:
  2353. ath10k_wmi_event_dcs_interference(ar, skb);
  2354. break;
  2355. case WMI_PDEV_TPC_CONFIG_EVENTID:
  2356. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  2357. break;
  2358. case WMI_PDEV_FTM_INTG_EVENTID:
  2359. ath10k_wmi_event_pdev_ftm_intg(ar, skb);
  2360. break;
  2361. case WMI_GTK_OFFLOAD_STATUS_EVENTID:
  2362. ath10k_wmi_event_gtk_offload_status(ar, skb);
  2363. break;
  2364. case WMI_GTK_REKEY_FAIL_EVENTID:
  2365. ath10k_wmi_event_gtk_rekey_fail(ar, skb);
  2366. break;
  2367. case WMI_TX_DELBA_COMPLETE_EVENTID:
  2368. ath10k_wmi_event_delba_complete(ar, skb);
  2369. break;
  2370. case WMI_TX_ADDBA_COMPLETE_EVENTID:
  2371. ath10k_wmi_event_addba_complete(ar, skb);
  2372. break;
  2373. case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
  2374. ath10k_wmi_event_vdev_install_key_complete(ar, skb);
  2375. break;
  2376. case WMI_SERVICE_READY_EVENTID:
  2377. ath10k_wmi_event_service_ready(ar, skb);
  2378. break;
  2379. case WMI_READY_EVENTID:
  2380. ath10k_wmi_event_ready(ar, skb);
  2381. break;
  2382. default:
  2383. ath10k_warn(ar, "Unknown eventid: %d\n", id);
  2384. break;
  2385. }
  2386. dev_kfree_skb(skb);
  2387. }
  2388. static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
  2389. {
  2390. struct wmi_cmd_hdr *cmd_hdr;
  2391. enum wmi_10x_event_id id;
  2392. bool consumed;
  2393. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  2394. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  2395. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  2396. return;
  2397. trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  2398. consumed = ath10k_tm_event_wmi(ar, id, skb);
  2399. /* Ready event must be handled normally also in UTF mode so that we
  2400. * know the UTF firmware has booted, others we are just bypass WMI
  2401. * events to testmode.
  2402. */
  2403. if (consumed && id != WMI_10X_READY_EVENTID) {
  2404. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2405. "wmi testmode consumed 0x%x\n", id);
  2406. goto out;
  2407. }
  2408. switch (id) {
  2409. case WMI_10X_MGMT_RX_EVENTID:
  2410. ath10k_wmi_event_mgmt_rx(ar, skb);
  2411. /* mgmt_rx() owns the skb now! */
  2412. return;
  2413. case WMI_10X_SCAN_EVENTID:
  2414. ath10k_wmi_event_scan(ar, skb);
  2415. break;
  2416. case WMI_10X_CHAN_INFO_EVENTID:
  2417. ath10k_wmi_event_chan_info(ar, skb);
  2418. break;
  2419. case WMI_10X_ECHO_EVENTID:
  2420. ath10k_wmi_event_echo(ar, skb);
  2421. break;
  2422. case WMI_10X_DEBUG_MESG_EVENTID:
  2423. ath10k_wmi_event_debug_mesg(ar, skb);
  2424. break;
  2425. case WMI_10X_UPDATE_STATS_EVENTID:
  2426. ath10k_wmi_event_update_stats(ar, skb);
  2427. break;
  2428. case WMI_10X_VDEV_START_RESP_EVENTID:
  2429. ath10k_wmi_event_vdev_start_resp(ar, skb);
  2430. break;
  2431. case WMI_10X_VDEV_STOPPED_EVENTID:
  2432. ath10k_wmi_event_vdev_stopped(ar, skb);
  2433. break;
  2434. case WMI_10X_PEER_STA_KICKOUT_EVENTID:
  2435. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  2436. break;
  2437. case WMI_10X_HOST_SWBA_EVENTID:
  2438. ath10k_wmi_event_host_swba(ar, skb);
  2439. break;
  2440. case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
  2441. ath10k_wmi_event_tbttoffset_update(ar, skb);
  2442. break;
  2443. case WMI_10X_PHYERR_EVENTID:
  2444. ath10k_wmi_event_phyerr(ar, skb);
  2445. break;
  2446. case WMI_10X_ROAM_EVENTID:
  2447. ath10k_wmi_event_roam(ar, skb);
  2448. break;
  2449. case WMI_10X_PROFILE_MATCH:
  2450. ath10k_wmi_event_profile_match(ar, skb);
  2451. break;
  2452. case WMI_10X_DEBUG_PRINT_EVENTID:
  2453. ath10k_wmi_event_debug_print(ar, skb);
  2454. break;
  2455. case WMI_10X_PDEV_QVIT_EVENTID:
  2456. ath10k_wmi_event_pdev_qvit(ar, skb);
  2457. break;
  2458. case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
  2459. ath10k_wmi_event_wlan_profile_data(ar, skb);
  2460. break;
  2461. case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
  2462. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  2463. break;
  2464. case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
  2465. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  2466. break;
  2467. case WMI_10X_RTT_ERROR_REPORT_EVENTID:
  2468. ath10k_wmi_event_rtt_error_report(ar, skb);
  2469. break;
  2470. case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
  2471. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  2472. break;
  2473. case WMI_10X_DCS_INTERFERENCE_EVENTID:
  2474. ath10k_wmi_event_dcs_interference(ar, skb);
  2475. break;
  2476. case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
  2477. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  2478. break;
  2479. case WMI_10X_INST_RSSI_STATS_EVENTID:
  2480. ath10k_wmi_event_inst_rssi_stats(ar, skb);
  2481. break;
  2482. case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
  2483. ath10k_wmi_event_vdev_standby_req(ar, skb);
  2484. break;
  2485. case WMI_10X_VDEV_RESUME_REQ_EVENTID:
  2486. ath10k_wmi_event_vdev_resume_req(ar, skb);
  2487. break;
  2488. case WMI_10X_SERVICE_READY_EVENTID:
  2489. ath10k_wmi_event_service_ready(ar, skb);
  2490. break;
  2491. case WMI_10X_READY_EVENTID:
  2492. ath10k_wmi_event_ready(ar, skb);
  2493. break;
  2494. case WMI_10X_PDEV_UTF_EVENTID:
  2495. /* ignore utf events */
  2496. break;
  2497. default:
  2498. ath10k_warn(ar, "Unknown eventid: %d\n", id);
  2499. break;
  2500. }
  2501. out:
  2502. dev_kfree_skb(skb);
  2503. }
  2504. static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb)
  2505. {
  2506. struct wmi_cmd_hdr *cmd_hdr;
  2507. enum wmi_10_2_event_id id;
  2508. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  2509. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  2510. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  2511. return;
  2512. trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  2513. switch (id) {
  2514. case WMI_10_2_MGMT_RX_EVENTID:
  2515. ath10k_wmi_event_mgmt_rx(ar, skb);
  2516. /* mgmt_rx() owns the skb now! */
  2517. return;
  2518. case WMI_10_2_SCAN_EVENTID:
  2519. ath10k_wmi_event_scan(ar, skb);
  2520. break;
  2521. case WMI_10_2_CHAN_INFO_EVENTID:
  2522. ath10k_wmi_event_chan_info(ar, skb);
  2523. break;
  2524. case WMI_10_2_ECHO_EVENTID:
  2525. ath10k_wmi_event_echo(ar, skb);
  2526. break;
  2527. case WMI_10_2_DEBUG_MESG_EVENTID:
  2528. ath10k_wmi_event_debug_mesg(ar, skb);
  2529. break;
  2530. case WMI_10_2_UPDATE_STATS_EVENTID:
  2531. ath10k_wmi_event_update_stats(ar, skb);
  2532. break;
  2533. case WMI_10_2_VDEV_START_RESP_EVENTID:
  2534. ath10k_wmi_event_vdev_start_resp(ar, skb);
  2535. break;
  2536. case WMI_10_2_VDEV_STOPPED_EVENTID:
  2537. ath10k_wmi_event_vdev_stopped(ar, skb);
  2538. break;
  2539. case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
  2540. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  2541. break;
  2542. case WMI_10_2_HOST_SWBA_EVENTID:
  2543. ath10k_wmi_event_host_swba(ar, skb);
  2544. break;
  2545. case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
  2546. ath10k_wmi_event_tbttoffset_update(ar, skb);
  2547. break;
  2548. case WMI_10_2_PHYERR_EVENTID:
  2549. ath10k_wmi_event_phyerr(ar, skb);
  2550. break;
  2551. case WMI_10_2_ROAM_EVENTID:
  2552. ath10k_wmi_event_roam(ar, skb);
  2553. break;
  2554. case WMI_10_2_PROFILE_MATCH:
  2555. ath10k_wmi_event_profile_match(ar, skb);
  2556. break;
  2557. case WMI_10_2_DEBUG_PRINT_EVENTID:
  2558. ath10k_wmi_event_debug_print(ar, skb);
  2559. break;
  2560. case WMI_10_2_PDEV_QVIT_EVENTID:
  2561. ath10k_wmi_event_pdev_qvit(ar, skb);
  2562. break;
  2563. case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
  2564. ath10k_wmi_event_wlan_profile_data(ar, skb);
  2565. break;
  2566. case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
  2567. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  2568. break;
  2569. case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
  2570. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  2571. break;
  2572. case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
  2573. ath10k_wmi_event_rtt_error_report(ar, skb);
  2574. break;
  2575. case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
  2576. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  2577. break;
  2578. case WMI_10_2_DCS_INTERFERENCE_EVENTID:
  2579. ath10k_wmi_event_dcs_interference(ar, skb);
  2580. break;
  2581. case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
  2582. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  2583. break;
  2584. case WMI_10_2_INST_RSSI_STATS_EVENTID:
  2585. ath10k_wmi_event_inst_rssi_stats(ar, skb);
  2586. break;
  2587. case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
  2588. ath10k_wmi_event_vdev_standby_req(ar, skb);
  2589. break;
  2590. case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
  2591. ath10k_wmi_event_vdev_resume_req(ar, skb);
  2592. break;
  2593. case WMI_10_2_SERVICE_READY_EVENTID:
  2594. ath10k_wmi_event_service_ready(ar, skb);
  2595. break;
  2596. case WMI_10_2_READY_EVENTID:
  2597. ath10k_wmi_event_ready(ar, skb);
  2598. break;
  2599. case WMI_10_2_RTT_KEEPALIVE_EVENTID:
  2600. case WMI_10_2_GPIO_INPUT_EVENTID:
  2601. case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
  2602. case WMI_10_2_GENERIC_BUFFER_EVENTID:
  2603. case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
  2604. case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
  2605. case WMI_10_2_WDS_PEER_EVENTID:
  2606. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2607. "received event id %d not implemented\n", id);
  2608. break;
  2609. default:
  2610. ath10k_warn(ar, "Unknown eventid: %d\n", id);
  2611. break;
  2612. }
  2613. dev_kfree_skb(skb);
  2614. }
  2615. static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
  2616. {
  2617. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  2618. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  2619. ath10k_wmi_10_2_process_rx(ar, skb);
  2620. else
  2621. ath10k_wmi_10x_process_rx(ar, skb);
  2622. } else {
  2623. ath10k_wmi_main_process_rx(ar, skb);
  2624. }
  2625. }
  2626. int ath10k_wmi_connect(struct ath10k *ar)
  2627. {
  2628. int status;
  2629. struct ath10k_htc_svc_conn_req conn_req;
  2630. struct ath10k_htc_svc_conn_resp conn_resp;
  2631. memset(&conn_req, 0, sizeof(conn_req));
  2632. memset(&conn_resp, 0, sizeof(conn_resp));
  2633. /* these fields are the same for all service endpoints */
  2634. conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
  2635. conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
  2636. conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
  2637. /* connect to control service */
  2638. conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
  2639. status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
  2640. if (status) {
  2641. ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
  2642. status);
  2643. return status;
  2644. }
  2645. ar->wmi.eid = conn_resp.eid;
  2646. return 0;
  2647. }
  2648. static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
  2649. u16 rd2g, u16 rd5g, u16 ctl2g,
  2650. u16 ctl5g)
  2651. {
  2652. struct wmi_pdev_set_regdomain_cmd *cmd;
  2653. struct sk_buff *skb;
  2654. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2655. if (!skb)
  2656. return -ENOMEM;
  2657. cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
  2658. cmd->reg_domain = __cpu_to_le32(rd);
  2659. cmd->reg_domain_2G = __cpu_to_le32(rd2g);
  2660. cmd->reg_domain_5G = __cpu_to_le32(rd5g);
  2661. cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
  2662. cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
  2663. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2664. "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
  2665. rd, rd2g, rd5g, ctl2g, ctl5g);
  2666. return ath10k_wmi_cmd_send(ar, skb,
  2667. ar->wmi.cmd->pdev_set_regdomain_cmdid);
  2668. }
  2669. static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
  2670. u16 rd2g, u16 rd5g,
  2671. u16 ctl2g, u16 ctl5g,
  2672. enum wmi_dfs_region dfs_reg)
  2673. {
  2674. struct wmi_pdev_set_regdomain_cmd_10x *cmd;
  2675. struct sk_buff *skb;
  2676. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2677. if (!skb)
  2678. return -ENOMEM;
  2679. cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
  2680. cmd->reg_domain = __cpu_to_le32(rd);
  2681. cmd->reg_domain_2G = __cpu_to_le32(rd2g);
  2682. cmd->reg_domain_5G = __cpu_to_le32(rd5g);
  2683. cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
  2684. cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
  2685. cmd->dfs_domain = __cpu_to_le32(dfs_reg);
  2686. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2687. "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
  2688. rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
  2689. return ath10k_wmi_cmd_send(ar, skb,
  2690. ar->wmi.cmd->pdev_set_regdomain_cmdid);
  2691. }
  2692. int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
  2693. u16 rd5g, u16 ctl2g, u16 ctl5g,
  2694. enum wmi_dfs_region dfs_reg)
  2695. {
  2696. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  2697. return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
  2698. ctl2g, ctl5g, dfs_reg);
  2699. else
  2700. return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
  2701. ctl2g, ctl5g);
  2702. }
  2703. int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
  2704. {
  2705. struct wmi_pdev_suspend_cmd *cmd;
  2706. struct sk_buff *skb;
  2707. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2708. if (!skb)
  2709. return -ENOMEM;
  2710. cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
  2711. cmd->suspend_opt = __cpu_to_le32(suspend_opt);
  2712. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
  2713. }
  2714. int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
  2715. {
  2716. struct sk_buff *skb;
  2717. skb = ath10k_wmi_alloc_skb(ar, 0);
  2718. if (skb == NULL)
  2719. return -ENOMEM;
  2720. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
  2721. }
  2722. int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
  2723. {
  2724. struct wmi_pdev_set_param_cmd *cmd;
  2725. struct sk_buff *skb;
  2726. if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
  2727. ath10k_warn(ar, "pdev param %d not supported by firmware\n",
  2728. id);
  2729. return -EOPNOTSUPP;
  2730. }
  2731. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  2732. if (!skb)
  2733. return -ENOMEM;
  2734. cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
  2735. cmd->param_id = __cpu_to_le32(id);
  2736. cmd->param_value = __cpu_to_le32(value);
  2737. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
  2738. id, value);
  2739. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
  2740. }
  2741. static void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
  2742. struct wmi_host_mem_chunks *chunks)
  2743. {
  2744. struct host_memory_chunk *chunk;
  2745. int i;
  2746. chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
  2747. for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  2748. chunk = &chunks->items[i];
  2749. chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
  2750. chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
  2751. chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
  2752. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2753. "wmi chunk %d len %d requested, addr 0x%llx\n",
  2754. i,
  2755. ar->wmi.mem_chunks[i].len,
  2756. (unsigned long long)ar->wmi.mem_chunks[i].paddr);
  2757. }
  2758. }
  2759. static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
  2760. {
  2761. struct wmi_init_cmd *cmd;
  2762. struct sk_buff *buf;
  2763. struct wmi_resource_config config = {};
  2764. u32 len, val;
  2765. config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
  2766. config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
  2767. config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
  2768. config.num_offload_reorder_bufs =
  2769. __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
  2770. config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
  2771. config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
  2772. config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
  2773. config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
  2774. config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
  2775. config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  2776. config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  2777. config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
  2778. config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
  2779. config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
  2780. config.scan_max_pending_reqs =
  2781. __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
  2782. config.bmiss_offload_max_vdev =
  2783. __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
  2784. config.roam_offload_max_vdev =
  2785. __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
  2786. config.roam_offload_max_ap_profiles =
  2787. __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
  2788. config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
  2789. config.num_mcast_table_elems =
  2790. __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
  2791. config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
  2792. config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
  2793. config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
  2794. config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
  2795. config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
  2796. val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  2797. config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  2798. config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
  2799. config.gtk_offload_max_vdev =
  2800. __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
  2801. config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
  2802. config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
  2803. len = sizeof(*cmd) +
  2804. (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  2805. buf = ath10k_wmi_alloc_skb(ar, len);
  2806. if (!buf)
  2807. return -ENOMEM;
  2808. cmd = (struct wmi_init_cmd *)buf->data;
  2809. memcpy(&cmd->resource_config, &config, sizeof(config));
  2810. ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
  2811. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
  2812. return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
  2813. }
  2814. static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
  2815. {
  2816. struct wmi_init_cmd_10x *cmd;
  2817. struct sk_buff *buf;
  2818. struct wmi_resource_config_10x config = {};
  2819. u32 len, val;
  2820. config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
  2821. config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
  2822. config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
  2823. config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
  2824. config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
  2825. config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
  2826. config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
  2827. config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2828. config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2829. config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2830. config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
  2831. config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
  2832. config.scan_max_pending_reqs =
  2833. __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
  2834. config.bmiss_offload_max_vdev =
  2835. __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
  2836. config.roam_offload_max_vdev =
  2837. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
  2838. config.roam_offload_max_ap_profiles =
  2839. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
  2840. config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
  2841. config.num_mcast_table_elems =
  2842. __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
  2843. config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
  2844. config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
  2845. config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
  2846. config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
  2847. config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
  2848. val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  2849. config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  2850. config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
  2851. config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
  2852. config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
  2853. len = sizeof(*cmd) +
  2854. (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  2855. buf = ath10k_wmi_alloc_skb(ar, len);
  2856. if (!buf)
  2857. return -ENOMEM;
  2858. cmd = (struct wmi_init_cmd_10x *)buf->data;
  2859. memcpy(&cmd->resource_config, &config, sizeof(config));
  2860. ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
  2861. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
  2862. return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
  2863. }
  2864. static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
  2865. {
  2866. struct wmi_init_cmd_10_2 *cmd;
  2867. struct sk_buff *buf;
  2868. struct wmi_resource_config_10x config = {};
  2869. u32 len, val;
  2870. config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
  2871. config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
  2872. config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
  2873. config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
  2874. config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
  2875. config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
  2876. config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
  2877. config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2878. config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2879. config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
  2880. config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
  2881. config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
  2882. config.scan_max_pending_reqs =
  2883. __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
  2884. config.bmiss_offload_max_vdev =
  2885. __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
  2886. config.roam_offload_max_vdev =
  2887. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
  2888. config.roam_offload_max_ap_profiles =
  2889. __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
  2890. config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
  2891. config.num_mcast_table_elems =
  2892. __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
  2893. config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
  2894. config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
  2895. config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
  2896. config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
  2897. config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
  2898. val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
  2899. config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
  2900. config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
  2901. config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
  2902. config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
  2903. len = sizeof(*cmd) +
  2904. (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
  2905. buf = ath10k_wmi_alloc_skb(ar, len);
  2906. if (!buf)
  2907. return -ENOMEM;
  2908. cmd = (struct wmi_init_cmd_10_2 *)buf->data;
  2909. memcpy(&cmd->resource_config.common, &config, sizeof(config));
  2910. ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
  2911. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
  2912. return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
  2913. }
  2914. int ath10k_wmi_cmd_init(struct ath10k *ar)
  2915. {
  2916. int ret;
  2917. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  2918. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  2919. ret = ath10k_wmi_10_2_cmd_init(ar);
  2920. else
  2921. ret = ath10k_wmi_10x_cmd_init(ar);
  2922. } else {
  2923. ret = ath10k_wmi_main_cmd_init(ar);
  2924. }
  2925. return ret;
  2926. }
  2927. static int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
  2928. {
  2929. if (arg->ie_len && !arg->ie)
  2930. return -EINVAL;
  2931. if (arg->n_channels && !arg->channels)
  2932. return -EINVAL;
  2933. if (arg->n_ssids && !arg->ssids)
  2934. return -EINVAL;
  2935. if (arg->n_bssids && !arg->bssids)
  2936. return -EINVAL;
  2937. if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
  2938. return -EINVAL;
  2939. if (arg->n_channels > ARRAY_SIZE(arg->channels))
  2940. return -EINVAL;
  2941. if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
  2942. return -EINVAL;
  2943. if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
  2944. return -EINVAL;
  2945. return 0;
  2946. }
  2947. static size_t
  2948. ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
  2949. {
  2950. int len = 0;
  2951. if (arg->ie_len) {
  2952. len += sizeof(struct wmi_ie_data);
  2953. len += roundup(arg->ie_len, 4);
  2954. }
  2955. if (arg->n_channels) {
  2956. len += sizeof(struct wmi_chan_list);
  2957. len += sizeof(__le32) * arg->n_channels;
  2958. }
  2959. if (arg->n_ssids) {
  2960. len += sizeof(struct wmi_ssid_list);
  2961. len += sizeof(struct wmi_ssid) * arg->n_ssids;
  2962. }
  2963. if (arg->n_bssids) {
  2964. len += sizeof(struct wmi_bssid_list);
  2965. len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
  2966. }
  2967. return len;
  2968. }
  2969. static void
  2970. ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
  2971. const struct wmi_start_scan_arg *arg)
  2972. {
  2973. u32 scan_id;
  2974. u32 scan_req_id;
  2975. scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
  2976. scan_id |= arg->scan_id;
  2977. scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
  2978. scan_req_id |= arg->scan_req_id;
  2979. cmn->scan_id = __cpu_to_le32(scan_id);
  2980. cmn->scan_req_id = __cpu_to_le32(scan_req_id);
  2981. cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
  2982. cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
  2983. cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
  2984. cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
  2985. cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
  2986. cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
  2987. cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
  2988. cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
  2989. cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
  2990. cmn->idle_time = __cpu_to_le32(arg->idle_time);
  2991. cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
  2992. cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
  2993. cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
  2994. }
  2995. static void
  2996. ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
  2997. const struct wmi_start_scan_arg *arg)
  2998. {
  2999. struct wmi_ie_data *ie;
  3000. struct wmi_chan_list *channels;
  3001. struct wmi_ssid_list *ssids;
  3002. struct wmi_bssid_list *bssids;
  3003. void *ptr = tlvs->tlvs;
  3004. int i;
  3005. if (arg->n_channels) {
  3006. channels = ptr;
  3007. channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
  3008. channels->num_chan = __cpu_to_le32(arg->n_channels);
  3009. for (i = 0; i < arg->n_channels; i++)
  3010. channels->channel_list[i].freq =
  3011. __cpu_to_le16(arg->channels[i]);
  3012. ptr += sizeof(*channels);
  3013. ptr += sizeof(__le32) * arg->n_channels;
  3014. }
  3015. if (arg->n_ssids) {
  3016. ssids = ptr;
  3017. ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
  3018. ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
  3019. for (i = 0; i < arg->n_ssids; i++) {
  3020. ssids->ssids[i].ssid_len =
  3021. __cpu_to_le32(arg->ssids[i].len);
  3022. memcpy(&ssids->ssids[i].ssid,
  3023. arg->ssids[i].ssid,
  3024. arg->ssids[i].len);
  3025. }
  3026. ptr += sizeof(*ssids);
  3027. ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
  3028. }
  3029. if (arg->n_bssids) {
  3030. bssids = ptr;
  3031. bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
  3032. bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
  3033. for (i = 0; i < arg->n_bssids; i++)
  3034. memcpy(&bssids->bssid_list[i],
  3035. arg->bssids[i].bssid,
  3036. ETH_ALEN);
  3037. ptr += sizeof(*bssids);
  3038. ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
  3039. }
  3040. if (arg->ie_len) {
  3041. ie = ptr;
  3042. ie->tag = __cpu_to_le32(WMI_IE_TAG);
  3043. ie->ie_len = __cpu_to_le32(arg->ie_len);
  3044. memcpy(ie->ie_data, arg->ie, arg->ie_len);
  3045. ptr += sizeof(*ie);
  3046. ptr += roundup(arg->ie_len, 4);
  3047. }
  3048. }
  3049. int ath10k_wmi_start_scan(struct ath10k *ar,
  3050. const struct wmi_start_scan_arg *arg)
  3051. {
  3052. struct sk_buff *skb;
  3053. size_t len;
  3054. int ret;
  3055. ret = ath10k_wmi_start_scan_verify(arg);
  3056. if (ret)
  3057. return ret;
  3058. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
  3059. len = sizeof(struct wmi_10x_start_scan_cmd) +
  3060. ath10k_wmi_start_scan_tlvs_len(arg);
  3061. else
  3062. len = sizeof(struct wmi_start_scan_cmd) +
  3063. ath10k_wmi_start_scan_tlvs_len(arg);
  3064. skb = ath10k_wmi_alloc_skb(ar, len);
  3065. if (!skb)
  3066. return -ENOMEM;
  3067. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  3068. struct wmi_10x_start_scan_cmd *cmd;
  3069. cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
  3070. ath10k_wmi_put_start_scan_common(&cmd->common, arg);
  3071. ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
  3072. } else {
  3073. struct wmi_start_scan_cmd *cmd;
  3074. cmd = (struct wmi_start_scan_cmd *)skb->data;
  3075. cmd->burst_duration_ms = __cpu_to_le32(0);
  3076. ath10k_wmi_put_start_scan_common(&cmd->common, arg);
  3077. ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
  3078. }
  3079. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
  3080. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
  3081. }
  3082. void ath10k_wmi_start_scan_init(struct ath10k *ar,
  3083. struct wmi_start_scan_arg *arg)
  3084. {
  3085. /* setup commonly used values */
  3086. arg->scan_req_id = 1;
  3087. arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
  3088. arg->dwell_time_active = 50;
  3089. arg->dwell_time_passive = 150;
  3090. arg->min_rest_time = 50;
  3091. arg->max_rest_time = 500;
  3092. arg->repeat_probe_time = 0;
  3093. arg->probe_spacing_time = 0;
  3094. arg->idle_time = 0;
  3095. arg->max_scan_time = 20000;
  3096. arg->probe_delay = 5;
  3097. arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
  3098. | WMI_SCAN_EVENT_COMPLETED
  3099. | WMI_SCAN_EVENT_BSS_CHANNEL
  3100. | WMI_SCAN_EVENT_FOREIGN_CHANNEL
  3101. | WMI_SCAN_EVENT_DEQUEUED;
  3102. arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
  3103. arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
  3104. arg->n_bssids = 1;
  3105. arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
  3106. }
  3107. int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
  3108. {
  3109. struct wmi_stop_scan_cmd *cmd;
  3110. struct sk_buff *skb;
  3111. u32 scan_id;
  3112. u32 req_id;
  3113. if (arg->req_id > 0xFFF)
  3114. return -EINVAL;
  3115. if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
  3116. return -EINVAL;
  3117. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3118. if (!skb)
  3119. return -ENOMEM;
  3120. scan_id = arg->u.scan_id;
  3121. scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
  3122. req_id = arg->req_id;
  3123. req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
  3124. cmd = (struct wmi_stop_scan_cmd *)skb->data;
  3125. cmd->req_type = __cpu_to_le32(arg->req_type);
  3126. cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
  3127. cmd->scan_id = __cpu_to_le32(scan_id);
  3128. cmd->scan_req_id = __cpu_to_le32(req_id);
  3129. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3130. "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
  3131. arg->req_id, arg->req_type, arg->u.scan_id);
  3132. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
  3133. }
  3134. int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
  3135. enum wmi_vdev_type type,
  3136. enum wmi_vdev_subtype subtype,
  3137. const u8 macaddr[ETH_ALEN])
  3138. {
  3139. struct wmi_vdev_create_cmd *cmd;
  3140. struct sk_buff *skb;
  3141. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3142. if (!skb)
  3143. return -ENOMEM;
  3144. cmd = (struct wmi_vdev_create_cmd *)skb->data;
  3145. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3146. cmd->vdev_type = __cpu_to_le32(type);
  3147. cmd->vdev_subtype = __cpu_to_le32(subtype);
  3148. ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
  3149. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3150. "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
  3151. vdev_id, type, subtype, macaddr);
  3152. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
  3153. }
  3154. int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
  3155. {
  3156. struct wmi_vdev_delete_cmd *cmd;
  3157. struct sk_buff *skb;
  3158. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3159. if (!skb)
  3160. return -ENOMEM;
  3161. cmd = (struct wmi_vdev_delete_cmd *)skb->data;
  3162. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3163. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3164. "WMI vdev delete id %d\n", vdev_id);
  3165. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
  3166. }
  3167. static int
  3168. ath10k_wmi_vdev_start_restart(struct ath10k *ar,
  3169. const struct wmi_vdev_start_request_arg *arg,
  3170. u32 cmd_id)
  3171. {
  3172. struct wmi_vdev_start_request_cmd *cmd;
  3173. struct sk_buff *skb;
  3174. const char *cmdname;
  3175. u32 flags = 0;
  3176. if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
  3177. cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
  3178. return -EINVAL;
  3179. if (WARN_ON(arg->ssid && arg->ssid_len == 0))
  3180. return -EINVAL;
  3181. if (WARN_ON(arg->hidden_ssid && !arg->ssid))
  3182. return -EINVAL;
  3183. if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
  3184. return -EINVAL;
  3185. if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
  3186. cmdname = "start";
  3187. else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
  3188. cmdname = "restart";
  3189. else
  3190. return -EINVAL; /* should not happen, we already check cmd_id */
  3191. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3192. if (!skb)
  3193. return -ENOMEM;
  3194. if (arg->hidden_ssid)
  3195. flags |= WMI_VDEV_START_HIDDEN_SSID;
  3196. if (arg->pmf_enabled)
  3197. flags |= WMI_VDEV_START_PMF_ENABLED;
  3198. cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
  3199. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3200. cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
  3201. cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
  3202. cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
  3203. cmd->flags = __cpu_to_le32(flags);
  3204. cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
  3205. cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
  3206. if (arg->ssid) {
  3207. cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
  3208. memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
  3209. }
  3210. ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
  3211. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3212. "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
  3213. cmdname, arg->vdev_id,
  3214. flags, arg->channel.freq, arg->channel.mode,
  3215. cmd->chan.flags, arg->channel.max_power);
  3216. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  3217. }
  3218. int ath10k_wmi_vdev_start(struct ath10k *ar,
  3219. const struct wmi_vdev_start_request_arg *arg)
  3220. {
  3221. u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
  3222. return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
  3223. }
  3224. int ath10k_wmi_vdev_restart(struct ath10k *ar,
  3225. const struct wmi_vdev_start_request_arg *arg)
  3226. {
  3227. u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
  3228. return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
  3229. }
  3230. int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
  3231. {
  3232. struct wmi_vdev_stop_cmd *cmd;
  3233. struct sk_buff *skb;
  3234. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3235. if (!skb)
  3236. return -ENOMEM;
  3237. cmd = (struct wmi_vdev_stop_cmd *)skb->data;
  3238. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3239. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
  3240. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
  3241. }
  3242. int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
  3243. {
  3244. struct wmi_vdev_up_cmd *cmd;
  3245. struct sk_buff *skb;
  3246. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3247. if (!skb)
  3248. return -ENOMEM;
  3249. cmd = (struct wmi_vdev_up_cmd *)skb->data;
  3250. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3251. cmd->vdev_assoc_id = __cpu_to_le32(aid);
  3252. ether_addr_copy(cmd->vdev_bssid.addr, bssid);
  3253. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3254. "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
  3255. vdev_id, aid, bssid);
  3256. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
  3257. }
  3258. int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
  3259. {
  3260. struct wmi_vdev_down_cmd *cmd;
  3261. struct sk_buff *skb;
  3262. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3263. if (!skb)
  3264. return -ENOMEM;
  3265. cmd = (struct wmi_vdev_down_cmd *)skb->data;
  3266. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3267. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3268. "wmi mgmt vdev down id 0x%x\n", vdev_id);
  3269. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
  3270. }
  3271. int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
  3272. u32 param_id, u32 param_value)
  3273. {
  3274. struct wmi_vdev_set_param_cmd *cmd;
  3275. struct sk_buff *skb;
  3276. if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
  3277. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3278. "vdev param %d not supported by firmware\n",
  3279. param_id);
  3280. return -EOPNOTSUPP;
  3281. }
  3282. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3283. if (!skb)
  3284. return -ENOMEM;
  3285. cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
  3286. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3287. cmd->param_id = __cpu_to_le32(param_id);
  3288. cmd->param_value = __cpu_to_le32(param_value);
  3289. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3290. "wmi vdev id 0x%x set param %d value %d\n",
  3291. vdev_id, param_id, param_value);
  3292. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
  3293. }
  3294. int ath10k_wmi_vdev_install_key(struct ath10k *ar,
  3295. const struct wmi_vdev_install_key_arg *arg)
  3296. {
  3297. struct wmi_vdev_install_key_cmd *cmd;
  3298. struct sk_buff *skb;
  3299. if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
  3300. return -EINVAL;
  3301. if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
  3302. return -EINVAL;
  3303. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
  3304. if (!skb)
  3305. return -ENOMEM;
  3306. cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
  3307. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3308. cmd->key_idx = __cpu_to_le32(arg->key_idx);
  3309. cmd->key_flags = __cpu_to_le32(arg->key_flags);
  3310. cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
  3311. cmd->key_len = __cpu_to_le32(arg->key_len);
  3312. cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
  3313. cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
  3314. if (arg->macaddr)
  3315. ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
  3316. if (arg->key_data)
  3317. memcpy(cmd->key_data, arg->key_data, arg->key_len);
  3318. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3319. "wmi vdev install key idx %d cipher %d len %d\n",
  3320. arg->key_idx, arg->key_cipher, arg->key_len);
  3321. return ath10k_wmi_cmd_send(ar, skb,
  3322. ar->wmi.cmd->vdev_install_key_cmdid);
  3323. }
  3324. int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
  3325. const struct wmi_vdev_spectral_conf_arg *arg)
  3326. {
  3327. struct wmi_vdev_spectral_conf_cmd *cmd;
  3328. struct sk_buff *skb;
  3329. u32 cmdid;
  3330. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3331. if (!skb)
  3332. return -ENOMEM;
  3333. cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
  3334. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3335. cmd->scan_count = __cpu_to_le32(arg->scan_count);
  3336. cmd->scan_period = __cpu_to_le32(arg->scan_period);
  3337. cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
  3338. cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
  3339. cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
  3340. cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
  3341. cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
  3342. cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
  3343. cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
  3344. cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
  3345. cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
  3346. cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
  3347. cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
  3348. cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
  3349. cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
  3350. cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
  3351. cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
  3352. cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
  3353. cmdid = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
  3354. return ath10k_wmi_cmd_send(ar, skb, cmdid);
  3355. }
  3356. int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
  3357. u32 enable)
  3358. {
  3359. struct wmi_vdev_spectral_enable_cmd *cmd;
  3360. struct sk_buff *skb;
  3361. u32 cmdid;
  3362. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3363. if (!skb)
  3364. return -ENOMEM;
  3365. cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
  3366. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3367. cmd->trigger_cmd = __cpu_to_le32(trigger);
  3368. cmd->enable_cmd = __cpu_to_le32(enable);
  3369. cmdid = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
  3370. return ath10k_wmi_cmd_send(ar, skb, cmdid);
  3371. }
  3372. int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
  3373. const u8 peer_addr[ETH_ALEN])
  3374. {
  3375. struct wmi_peer_create_cmd *cmd;
  3376. struct sk_buff *skb;
  3377. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3378. if (!skb)
  3379. return -ENOMEM;
  3380. cmd = (struct wmi_peer_create_cmd *)skb->data;
  3381. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3382. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3383. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3384. "wmi peer create vdev_id %d peer_addr %pM\n",
  3385. vdev_id, peer_addr);
  3386. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
  3387. }
  3388. int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
  3389. const u8 peer_addr[ETH_ALEN])
  3390. {
  3391. struct wmi_peer_delete_cmd *cmd;
  3392. struct sk_buff *skb;
  3393. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3394. if (!skb)
  3395. return -ENOMEM;
  3396. cmd = (struct wmi_peer_delete_cmd *)skb->data;
  3397. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3398. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3399. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3400. "wmi peer delete vdev_id %d peer_addr %pM\n",
  3401. vdev_id, peer_addr);
  3402. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
  3403. }
  3404. int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
  3405. const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  3406. {
  3407. struct wmi_peer_flush_tids_cmd *cmd;
  3408. struct sk_buff *skb;
  3409. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3410. if (!skb)
  3411. return -ENOMEM;
  3412. cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
  3413. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3414. cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
  3415. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3416. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3417. "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
  3418. vdev_id, peer_addr, tid_bitmap);
  3419. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
  3420. }
  3421. int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
  3422. const u8 *peer_addr, enum wmi_peer_param param_id,
  3423. u32 param_value)
  3424. {
  3425. struct wmi_peer_set_param_cmd *cmd;
  3426. struct sk_buff *skb;
  3427. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3428. if (!skb)
  3429. return -ENOMEM;
  3430. cmd = (struct wmi_peer_set_param_cmd *)skb->data;
  3431. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3432. cmd->param_id = __cpu_to_le32(param_id);
  3433. cmd->param_value = __cpu_to_le32(param_value);
  3434. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  3435. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3436. "wmi vdev %d peer 0x%pM set param %d value %d\n",
  3437. vdev_id, peer_addr, param_id, param_value);
  3438. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
  3439. }
  3440. int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
  3441. enum wmi_sta_ps_mode psmode)
  3442. {
  3443. struct wmi_sta_powersave_mode_cmd *cmd;
  3444. struct sk_buff *skb;
  3445. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3446. if (!skb)
  3447. return -ENOMEM;
  3448. cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
  3449. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3450. cmd->sta_ps_mode = __cpu_to_le32(psmode);
  3451. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3452. "wmi set powersave id 0x%x mode %d\n",
  3453. vdev_id, psmode);
  3454. return ath10k_wmi_cmd_send(ar, skb,
  3455. ar->wmi.cmd->sta_powersave_mode_cmdid);
  3456. }
  3457. int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
  3458. enum wmi_sta_powersave_param param_id,
  3459. u32 value)
  3460. {
  3461. struct wmi_sta_powersave_param_cmd *cmd;
  3462. struct sk_buff *skb;
  3463. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3464. if (!skb)
  3465. return -ENOMEM;
  3466. cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
  3467. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3468. cmd->param_id = __cpu_to_le32(param_id);
  3469. cmd->param_value = __cpu_to_le32(value);
  3470. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3471. "wmi sta ps param vdev_id 0x%x param %d value %d\n",
  3472. vdev_id, param_id, value);
  3473. return ath10k_wmi_cmd_send(ar, skb,
  3474. ar->wmi.cmd->sta_powersave_param_cmdid);
  3475. }
  3476. int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  3477. enum wmi_ap_ps_peer_param param_id, u32 value)
  3478. {
  3479. struct wmi_ap_ps_peer_cmd *cmd;
  3480. struct sk_buff *skb;
  3481. if (!mac)
  3482. return -EINVAL;
  3483. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3484. if (!skb)
  3485. return -ENOMEM;
  3486. cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
  3487. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3488. cmd->param_id = __cpu_to_le32(param_id);
  3489. cmd->param_value = __cpu_to_le32(value);
  3490. ether_addr_copy(cmd->peer_macaddr.addr, mac);
  3491. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3492. "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
  3493. vdev_id, param_id, value, mac);
  3494. return ath10k_wmi_cmd_send(ar, skb,
  3495. ar->wmi.cmd->ap_ps_peer_param_cmdid);
  3496. }
  3497. int ath10k_wmi_scan_chan_list(struct ath10k *ar,
  3498. const struct wmi_scan_chan_list_arg *arg)
  3499. {
  3500. struct wmi_scan_chan_list_cmd *cmd;
  3501. struct sk_buff *skb;
  3502. struct wmi_channel_arg *ch;
  3503. struct wmi_channel *ci;
  3504. int len;
  3505. int i;
  3506. len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
  3507. skb = ath10k_wmi_alloc_skb(ar, len);
  3508. if (!skb)
  3509. return -EINVAL;
  3510. cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
  3511. cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
  3512. for (i = 0; i < arg->n_channels; i++) {
  3513. ch = &arg->channels[i];
  3514. ci = &cmd->chan_info[i];
  3515. ath10k_wmi_put_wmi_channel(ci, ch);
  3516. }
  3517. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
  3518. }
  3519. static void
  3520. ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
  3521. const struct wmi_peer_assoc_complete_arg *arg)
  3522. {
  3523. struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
  3524. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3525. cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
  3526. cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
  3527. cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
  3528. cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
  3529. cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
  3530. cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
  3531. cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
  3532. cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
  3533. cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
  3534. cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
  3535. cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
  3536. cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
  3537. ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
  3538. cmd->peer_legacy_rates.num_rates =
  3539. __cpu_to_le32(arg->peer_legacy_rates.num_rates);
  3540. memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
  3541. arg->peer_legacy_rates.num_rates);
  3542. cmd->peer_ht_rates.num_rates =
  3543. __cpu_to_le32(arg->peer_ht_rates.num_rates);
  3544. memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
  3545. arg->peer_ht_rates.num_rates);
  3546. cmd->peer_vht_rates.rx_max_rate =
  3547. __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
  3548. cmd->peer_vht_rates.rx_mcs_set =
  3549. __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
  3550. cmd->peer_vht_rates.tx_max_rate =
  3551. __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
  3552. cmd->peer_vht_rates.tx_mcs_set =
  3553. __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
  3554. }
  3555. static void
  3556. ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
  3557. const struct wmi_peer_assoc_complete_arg *arg)
  3558. {
  3559. struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
  3560. ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  3561. memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
  3562. }
  3563. static void
  3564. ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
  3565. const struct wmi_peer_assoc_complete_arg *arg)
  3566. {
  3567. ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  3568. }
  3569. static void
  3570. ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
  3571. const struct wmi_peer_assoc_complete_arg *arg)
  3572. {
  3573. struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
  3574. int max_mcs, max_nss;
  3575. u32 info0;
  3576. /* TODO: Is using max values okay with firmware? */
  3577. max_mcs = 0xf;
  3578. max_nss = 0xf;
  3579. info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
  3580. SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
  3581. ath10k_wmi_peer_assoc_fill(ar, buf, arg);
  3582. cmd->info0 = __cpu_to_le32(info0);
  3583. }
  3584. int ath10k_wmi_peer_assoc(struct ath10k *ar,
  3585. const struct wmi_peer_assoc_complete_arg *arg)
  3586. {
  3587. struct sk_buff *skb;
  3588. int len;
  3589. if (arg->peer_mpdu_density > 16)
  3590. return -EINVAL;
  3591. if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
  3592. return -EINVAL;
  3593. if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
  3594. return -EINVAL;
  3595. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  3596. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  3597. len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
  3598. else
  3599. len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
  3600. } else {
  3601. len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
  3602. }
  3603. skb = ath10k_wmi_alloc_skb(ar, len);
  3604. if (!skb)
  3605. return -ENOMEM;
  3606. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  3607. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  3608. ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
  3609. else
  3610. ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
  3611. } else {
  3612. ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
  3613. }
  3614. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3615. "wmi peer assoc vdev %d addr %pM (%s)\n",
  3616. arg->vdev_id, arg->addr,
  3617. arg->peer_reassoc ? "reassociate" : "new");
  3618. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
  3619. }
  3620. /* This function assumes the beacon is already DMA mapped */
  3621. int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
  3622. {
  3623. struct wmi_bcn_tx_ref_cmd *cmd;
  3624. struct sk_buff *skb;
  3625. struct sk_buff *beacon = arvif->beacon;
  3626. struct ath10k *ar = arvif->ar;
  3627. struct ieee80211_hdr *hdr;
  3628. int ret;
  3629. u16 fc;
  3630. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3631. if (!skb)
  3632. return -ENOMEM;
  3633. hdr = (struct ieee80211_hdr *)beacon->data;
  3634. fc = le16_to_cpu(hdr->frame_control);
  3635. cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
  3636. cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
  3637. cmd->data_len = __cpu_to_le32(beacon->len);
  3638. cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr);
  3639. cmd->msdu_id = 0;
  3640. cmd->frame_control = __cpu_to_le32(fc);
  3641. cmd->flags = 0;
  3642. cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
  3643. if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero)
  3644. cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
  3645. if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab)
  3646. cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
  3647. ret = ath10k_wmi_cmd_send_nowait(ar, skb,
  3648. ar->wmi.cmd->pdev_send_bcn_cmdid);
  3649. if (ret)
  3650. dev_kfree_skb(skb);
  3651. return ret;
  3652. }
  3653. static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
  3654. const struct wmi_wmm_params_arg *arg)
  3655. {
  3656. params->cwmin = __cpu_to_le32(arg->cwmin);
  3657. params->cwmax = __cpu_to_le32(arg->cwmax);
  3658. params->aifs = __cpu_to_le32(arg->aifs);
  3659. params->txop = __cpu_to_le32(arg->txop);
  3660. params->acm = __cpu_to_le32(arg->acm);
  3661. params->no_ack = __cpu_to_le32(arg->no_ack);
  3662. }
  3663. int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
  3664. const struct wmi_pdev_set_wmm_params_arg *arg)
  3665. {
  3666. struct wmi_pdev_set_wmm_params *cmd;
  3667. struct sk_buff *skb;
  3668. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3669. if (!skb)
  3670. return -ENOMEM;
  3671. cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
  3672. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
  3673. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
  3674. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
  3675. ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
  3676. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
  3677. return ath10k_wmi_cmd_send(ar, skb,
  3678. ar->wmi.cmd->pdev_set_wmm_params_cmdid);
  3679. }
  3680. int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
  3681. {
  3682. struct wmi_request_stats_cmd *cmd;
  3683. struct sk_buff *skb;
  3684. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3685. if (!skb)
  3686. return -ENOMEM;
  3687. cmd = (struct wmi_request_stats_cmd *)skb->data;
  3688. cmd->stats_id = __cpu_to_le32(stats_id);
  3689. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
  3690. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
  3691. }
  3692. int ath10k_wmi_force_fw_hang(struct ath10k *ar,
  3693. enum wmi_force_fw_hang_type type, u32 delay_ms)
  3694. {
  3695. struct wmi_force_fw_hang_cmd *cmd;
  3696. struct sk_buff *skb;
  3697. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3698. if (!skb)
  3699. return -ENOMEM;
  3700. cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
  3701. cmd->type = __cpu_to_le32(type);
  3702. cmd->delay_ms = __cpu_to_le32(delay_ms);
  3703. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
  3704. type, delay_ms);
  3705. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
  3706. }
  3707. int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
  3708. {
  3709. struct wmi_dbglog_cfg_cmd *cmd;
  3710. struct sk_buff *skb;
  3711. u32 cfg;
  3712. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3713. if (!skb)
  3714. return -ENOMEM;
  3715. cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
  3716. if (module_enable) {
  3717. cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE,
  3718. ATH10K_DBGLOG_CFG_LOG_LVL);
  3719. } else {
  3720. /* set back defaults, all modules with WARN level */
  3721. cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
  3722. ATH10K_DBGLOG_CFG_LOG_LVL);
  3723. module_enable = ~0;
  3724. }
  3725. cmd->module_enable = __cpu_to_le32(module_enable);
  3726. cmd->module_valid = __cpu_to_le32(~0);
  3727. cmd->config_enable = __cpu_to_le32(cfg);
  3728. cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
  3729. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3730. "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
  3731. __le32_to_cpu(cmd->module_enable),
  3732. __le32_to_cpu(cmd->module_valid),
  3733. __le32_to_cpu(cmd->config_enable),
  3734. __le32_to_cpu(cmd->config_valid));
  3735. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
  3736. }
  3737. int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
  3738. {
  3739. struct wmi_pdev_pktlog_enable_cmd *cmd;
  3740. struct sk_buff *skb;
  3741. skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
  3742. if (!skb)
  3743. return -ENOMEM;
  3744. ev_bitmap &= ATH10K_PKTLOG_ANY;
  3745. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3746. "wmi enable pktlog filter:%x\n", ev_bitmap);
  3747. cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
  3748. cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
  3749. return ath10k_wmi_cmd_send(ar, skb,
  3750. ar->wmi.cmd->pdev_pktlog_enable_cmdid);
  3751. }
  3752. int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
  3753. {
  3754. struct sk_buff *skb;
  3755. skb = ath10k_wmi_alloc_skb(ar, 0);
  3756. if (!skb)
  3757. return -ENOMEM;
  3758. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
  3759. return ath10k_wmi_cmd_send(ar, skb,
  3760. ar->wmi.cmd->pdev_pktlog_disable_cmdid);
  3761. }
  3762. int ath10k_wmi_attach(struct ath10k *ar)
  3763. {
  3764. if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
  3765. if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
  3766. ar->wmi.cmd = &wmi_10_2_cmd_map;
  3767. else
  3768. ar->wmi.cmd = &wmi_10x_cmd_map;
  3769. ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
  3770. ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
  3771. } else {
  3772. ar->wmi.cmd = &wmi_cmd_map;
  3773. ar->wmi.vdev_param = &wmi_vdev_param_map;
  3774. ar->wmi.pdev_param = &wmi_pdev_param_map;
  3775. }
  3776. init_completion(&ar->wmi.service_ready);
  3777. init_completion(&ar->wmi.unified_ready);
  3778. return 0;
  3779. }
  3780. void ath10k_wmi_detach(struct ath10k *ar)
  3781. {
  3782. int i;
  3783. /* free the host memory chunks requested by firmware */
  3784. for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  3785. dma_free_coherent(ar->dev,
  3786. ar->wmi.mem_chunks[i].len,
  3787. ar->wmi.mem_chunks[i].vaddr,
  3788. ar->wmi.mem_chunks[i].paddr);
  3789. }
  3790. ar->wmi.num_mem_chunks = 0;
  3791. }