wmi-ops.h 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  4. * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  5. *
  6. * Permission to use, copy, modify, and/or distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _WMI_OPS_H_
  19. #define _WMI_OPS_H_
  20. struct ath10k;
  21. struct sk_buff;
  22. struct wmi_ops {
  23. void (*rx)(struct ath10k *ar, struct sk_buff *skb);
  24. void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
  25. void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
  26. int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
  27. struct wmi_scan_ev_arg *arg);
  28. int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
  29. struct wmi_mgmt_rx_ev_arg *arg);
  30. int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
  31. struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
  32. int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
  33. struct wmi_ch_info_ev_arg *arg);
  34. int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
  35. struct wmi_vdev_start_ev_arg *arg);
  36. int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
  37. struct wmi_peer_kick_ev_arg *arg);
  38. int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
  39. struct wmi_swba_ev_arg *arg);
  40. int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
  41. struct wmi_phyerr_hdr_arg *arg);
  42. int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
  43. int left_len, struct wmi_phyerr_ev_arg *arg);
  44. int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
  45. struct wmi_svc_rdy_ev_arg *arg);
  46. int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
  47. struct wmi_rdy_ev_arg *arg);
  48. int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
  49. struct ath10k_fw_stats *stats);
  50. int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
  51. struct wmi_roam_ev_arg *arg);
  52. int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
  53. struct wmi_wow_ev_arg *arg);
  54. int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
  55. struct wmi_echo_ev_arg *arg);
  56. int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
  57. struct wmi_dfs_status_ev_arg *arg);
  58. int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
  59. struct wmi_svc_avail_ev_arg *arg);
  60. enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
  61. struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
  62. struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
  63. struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
  64. u16 rd5g, u16 ctl2g, u16 ctl5g,
  65. enum wmi_dfs_region dfs_reg);
  66. struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
  67. u32 value);
  68. struct sk_buff *(*gen_init)(struct ath10k *ar);
  69. struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
  70. const struct wmi_start_scan_arg *arg);
  71. struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
  72. const struct wmi_stop_scan_arg *arg);
  73. struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
  74. enum wmi_vdev_type type,
  75. enum wmi_vdev_subtype subtype,
  76. const u8 macaddr[ETH_ALEN]);
  77. struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
  78. struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
  79. const struct wmi_vdev_start_request_arg *arg,
  80. bool restart);
  81. struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
  82. struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
  83. const u8 *bssid);
  84. struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
  85. struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
  86. u32 param_id, u32 param_value);
  87. struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
  88. const struct wmi_vdev_install_key_arg *arg);
  89. struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
  90. const struct wmi_vdev_spectral_conf_arg *arg);
  91. struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
  92. u32 trigger, u32 enable);
  93. struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
  94. const struct wmi_wmm_params_all_arg *arg);
  95. struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
  96. const u8 peer_addr[ETH_ALEN],
  97. enum wmi_peer_type peer_type);
  98. struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
  99. const u8 peer_addr[ETH_ALEN]);
  100. struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
  101. const u8 peer_addr[ETH_ALEN],
  102. u32 tid_bitmap);
  103. struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
  104. const u8 *peer_addr,
  105. enum wmi_peer_param param_id,
  106. u32 param_value);
  107. struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
  108. const struct wmi_peer_assoc_complete_arg *arg);
  109. struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
  110. enum wmi_sta_ps_mode psmode);
  111. struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
  112. enum wmi_sta_powersave_param param_id,
  113. u32 value);
  114. struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
  115. const u8 *mac,
  116. enum wmi_ap_ps_peer_param param_id,
  117. u32 value);
  118. struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
  119. const struct wmi_scan_chan_list_arg *arg);
  120. struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
  121. u32 prob_req_oui);
  122. struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
  123. const void *bcn, size_t bcn_len,
  124. u32 bcn_paddr, bool dtim_zero,
  125. bool deliver_cab);
  126. struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
  127. const struct wmi_wmm_params_all_arg *arg);
  128. struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
  129. struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
  130. enum wmi_force_fw_hang_type type,
  131. u32 delay_ms);
  132. struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
  133. struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
  134. struct sk_buff *skb,
  135. dma_addr_t paddr);
  136. struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
  137. u32 log_level);
  138. struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
  139. struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
  140. struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
  141. u32 period, u32 duration,
  142. u32 next_offset,
  143. u32 enabled);
  144. struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
  145. struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
  146. const u8 *mac);
  147. struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
  148. const u8 *mac, u32 tid, u32 buf_size);
  149. struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
  150. const u8 *mac, u32 tid,
  151. u32 status);
  152. struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
  153. const u8 *mac, u32 tid, u32 initiator,
  154. u32 reason);
  155. struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
  156. u32 tim_ie_offset, struct sk_buff *bcn,
  157. u32 prb_caps, u32 prb_erp,
  158. void *prb_ies, size_t prb_ies_len);
  159. struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
  160. struct sk_buff *bcn);
  161. struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
  162. const u8 *p2p_ie);
  163. struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
  164. const u8 peer_addr[ETH_ALEN],
  165. const struct wmi_sta_uapsd_auto_trig_arg *args,
  166. u32 num_ac);
  167. struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
  168. const struct wmi_sta_keepalive_arg *arg);
  169. struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
  170. struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
  171. enum wmi_wow_wakeup_event event,
  172. u32 enable);
  173. struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
  174. struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
  175. u32 pattern_id,
  176. const u8 *pattern,
  177. const u8 *mask,
  178. int pattern_len,
  179. int pattern_offset);
  180. struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
  181. u32 pattern_id);
  182. struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
  183. u32 vdev_id,
  184. enum wmi_tdls_state state);
  185. struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
  186. const struct wmi_tdls_peer_update_cmd_arg *arg,
  187. const struct wmi_tdls_peer_capab_arg *cap,
  188. const struct wmi_channel_arg *chan);
  189. struct sk_buff *(*gen_radar_found)
  190. (struct ath10k *ar,
  191. const struct ath10k_radar_found_info *arg);
  192. struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
  193. struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
  194. u32 param);
  195. void (*fw_stats_fill)(struct ath10k *ar,
  196. struct ath10k_fw_stats *fw_stats,
  197. char *buf);
  198. struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
  199. u8 enable,
  200. u32 detect_level,
  201. u32 detect_margin);
  202. struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
  203. enum wmi_host_platform_type type,
  204. u32 fw_feature_bitmap);
  205. int (*get_vdev_subtype)(struct ath10k *ar,
  206. enum wmi_vdev_subtype subtype);
  207. struct sk_buff *(*gen_pdev_bss_chan_info_req)
  208. (struct ath10k *ar,
  209. enum wmi_bss_survey_req_type type);
  210. struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
  211. struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
  212. u32 param);
  213. };
  214. int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
  215. static inline int
  216. ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
  217. {
  218. if (WARN_ON_ONCE(!ar->wmi.ops->rx))
  219. return -EOPNOTSUPP;
  220. ar->wmi.ops->rx(ar, skb);
  221. return 0;
  222. }
  223. static inline int
  224. ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
  225. size_t len)
  226. {
  227. if (!ar->wmi.ops->map_svc)
  228. return -EOPNOTSUPP;
  229. ar->wmi.ops->map_svc(in, out, len);
  230. return 0;
  231. }
  232. static inline int
  233. ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
  234. size_t len)
  235. {
  236. if (!ar->wmi.ops->map_svc_ext)
  237. return -EOPNOTSUPP;
  238. ar->wmi.ops->map_svc_ext(in, out, len);
  239. return 0;
  240. }
  241. static inline int
  242. ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
  243. struct wmi_scan_ev_arg *arg)
  244. {
  245. if (!ar->wmi.ops->pull_scan)
  246. return -EOPNOTSUPP;
  247. return ar->wmi.ops->pull_scan(ar, skb, arg);
  248. }
  249. static inline int
  250. ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
  251. struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
  252. {
  253. if (!ar->wmi.ops->pull_mgmt_tx_compl)
  254. return -EOPNOTSUPP;
  255. return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
  256. }
  257. static inline int
  258. ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
  259. struct wmi_mgmt_rx_ev_arg *arg)
  260. {
  261. if (!ar->wmi.ops->pull_mgmt_rx)
  262. return -EOPNOTSUPP;
  263. return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
  264. }
  265. static inline int
  266. ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
  267. struct wmi_ch_info_ev_arg *arg)
  268. {
  269. if (!ar->wmi.ops->pull_ch_info)
  270. return -EOPNOTSUPP;
  271. return ar->wmi.ops->pull_ch_info(ar, skb, arg);
  272. }
  273. static inline int
  274. ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
  275. struct wmi_vdev_start_ev_arg *arg)
  276. {
  277. if (!ar->wmi.ops->pull_vdev_start)
  278. return -EOPNOTSUPP;
  279. return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
  280. }
  281. static inline int
  282. ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
  283. struct wmi_peer_kick_ev_arg *arg)
  284. {
  285. if (!ar->wmi.ops->pull_peer_kick)
  286. return -EOPNOTSUPP;
  287. return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
  288. }
  289. static inline int
  290. ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
  291. struct wmi_swba_ev_arg *arg)
  292. {
  293. if (!ar->wmi.ops->pull_swba)
  294. return -EOPNOTSUPP;
  295. return ar->wmi.ops->pull_swba(ar, skb, arg);
  296. }
  297. static inline int
  298. ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
  299. struct wmi_phyerr_hdr_arg *arg)
  300. {
  301. if (!ar->wmi.ops->pull_phyerr_hdr)
  302. return -EOPNOTSUPP;
  303. return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
  304. }
  305. static inline int
  306. ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
  307. int left_len, struct wmi_phyerr_ev_arg *arg)
  308. {
  309. if (!ar->wmi.ops->pull_phyerr)
  310. return -EOPNOTSUPP;
  311. return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
  312. }
  313. static inline int
  314. ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
  315. struct wmi_svc_rdy_ev_arg *arg)
  316. {
  317. if (!ar->wmi.ops->pull_svc_rdy)
  318. return -EOPNOTSUPP;
  319. return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
  320. }
  321. static inline int
  322. ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
  323. struct wmi_rdy_ev_arg *arg)
  324. {
  325. if (!ar->wmi.ops->pull_rdy)
  326. return -EOPNOTSUPP;
  327. return ar->wmi.ops->pull_rdy(ar, skb, arg);
  328. }
  329. static inline int
  330. ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
  331. struct wmi_svc_avail_ev_arg *arg)
  332. {
  333. if (!ar->wmi.ops->pull_svc_avail)
  334. return -EOPNOTSUPP;
  335. return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
  336. }
  337. static inline int
  338. ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
  339. struct ath10k_fw_stats *stats)
  340. {
  341. if (!ar->wmi.ops->pull_fw_stats)
  342. return -EOPNOTSUPP;
  343. return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
  344. }
  345. static inline int
  346. ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
  347. struct wmi_roam_ev_arg *arg)
  348. {
  349. if (!ar->wmi.ops->pull_roam_ev)
  350. return -EOPNOTSUPP;
  351. return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
  352. }
  353. static inline int
  354. ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
  355. struct wmi_wow_ev_arg *arg)
  356. {
  357. if (!ar->wmi.ops->pull_wow_event)
  358. return -EOPNOTSUPP;
  359. return ar->wmi.ops->pull_wow_event(ar, skb, arg);
  360. }
  361. static inline int
  362. ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
  363. struct wmi_echo_ev_arg *arg)
  364. {
  365. if (!ar->wmi.ops->pull_echo_ev)
  366. return -EOPNOTSUPP;
  367. return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
  368. }
  369. static inline int
  370. ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
  371. struct wmi_dfs_status_ev_arg *arg)
  372. {
  373. if (!ar->wmi.ops->pull_dfs_status_ev)
  374. return -EOPNOTSUPP;
  375. return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
  376. }
  377. static inline enum wmi_txbf_conf
  378. ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
  379. {
  380. if (!ar->wmi.ops->get_txbf_conf_scheme)
  381. return WMI_TXBF_CONF_UNSUPPORTED;
  382. return ar->wmi.ops->get_txbf_conf_scheme(ar);
  383. }
  384. static inline int
  385. ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
  386. dma_addr_t paddr)
  387. {
  388. struct sk_buff *skb;
  389. int ret;
  390. if (!ar->wmi.ops->gen_mgmt_tx_send)
  391. return -EOPNOTSUPP;
  392. skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
  393. if (IS_ERR(skb))
  394. return PTR_ERR(skb);
  395. ret = ath10k_wmi_cmd_send(ar, skb,
  396. ar->wmi.cmd->mgmt_tx_send_cmdid);
  397. if (ret)
  398. return ret;
  399. return 0;
  400. }
  401. static inline int
  402. ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
  403. {
  404. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
  405. struct sk_buff *skb;
  406. int ret;
  407. if (!ar->wmi.ops->gen_mgmt_tx)
  408. return -EOPNOTSUPP;
  409. skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
  410. if (IS_ERR(skb))
  411. return PTR_ERR(skb);
  412. ret = ath10k_wmi_cmd_send(ar, skb,
  413. ar->wmi.cmd->mgmt_tx_cmdid);
  414. if (ret)
  415. return ret;
  416. /* FIXME There's no ACK event for Management Tx. This probably
  417. * shouldn't be called here either.
  418. */
  419. info->flags |= IEEE80211_TX_STAT_ACK;
  420. ieee80211_tx_status_irqsafe(ar->hw, msdu);
  421. return 0;
  422. }
  423. static inline int
  424. ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
  425. u16 ctl2g, u16 ctl5g,
  426. enum wmi_dfs_region dfs_reg)
  427. {
  428. struct sk_buff *skb;
  429. if (!ar->wmi.ops->gen_pdev_set_rd)
  430. return -EOPNOTSUPP;
  431. skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
  432. dfs_reg);
  433. if (IS_ERR(skb))
  434. return PTR_ERR(skb);
  435. return ath10k_wmi_cmd_send(ar, skb,
  436. ar->wmi.cmd->pdev_set_regdomain_cmdid);
  437. }
  438. static inline int
  439. ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
  440. {
  441. struct sk_buff *skb;
  442. if (!ar->wmi.ops->gen_pdev_suspend)
  443. return -EOPNOTSUPP;
  444. skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
  445. if (IS_ERR(skb))
  446. return PTR_ERR(skb);
  447. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
  448. }
  449. static inline int
  450. ath10k_wmi_pdev_resume_target(struct ath10k *ar)
  451. {
  452. struct sk_buff *skb;
  453. if (!ar->wmi.ops->gen_pdev_resume)
  454. return -EOPNOTSUPP;
  455. skb = ar->wmi.ops->gen_pdev_resume(ar);
  456. if (IS_ERR(skb))
  457. return PTR_ERR(skb);
  458. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
  459. }
  460. static inline int
  461. ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
  462. {
  463. struct sk_buff *skb;
  464. if (!ar->wmi.ops->gen_pdev_set_param)
  465. return -EOPNOTSUPP;
  466. skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
  467. if (IS_ERR(skb))
  468. return PTR_ERR(skb);
  469. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
  470. }
  471. static inline int
  472. ath10k_wmi_cmd_init(struct ath10k *ar)
  473. {
  474. struct sk_buff *skb;
  475. if (!ar->wmi.ops->gen_init)
  476. return -EOPNOTSUPP;
  477. skb = ar->wmi.ops->gen_init(ar);
  478. if (IS_ERR(skb))
  479. return PTR_ERR(skb);
  480. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
  481. }
  482. static inline int
  483. ath10k_wmi_start_scan(struct ath10k *ar,
  484. const struct wmi_start_scan_arg *arg)
  485. {
  486. struct sk_buff *skb;
  487. if (!ar->wmi.ops->gen_start_scan)
  488. return -EOPNOTSUPP;
  489. skb = ar->wmi.ops->gen_start_scan(ar, arg);
  490. if (IS_ERR(skb))
  491. return PTR_ERR(skb);
  492. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
  493. }
  494. static inline int
  495. ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
  496. {
  497. struct sk_buff *skb;
  498. if (!ar->wmi.ops->gen_stop_scan)
  499. return -EOPNOTSUPP;
  500. skb = ar->wmi.ops->gen_stop_scan(ar, arg);
  501. if (IS_ERR(skb))
  502. return PTR_ERR(skb);
  503. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
  504. }
  505. static inline int
  506. ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
  507. enum wmi_vdev_type type,
  508. enum wmi_vdev_subtype subtype,
  509. const u8 macaddr[ETH_ALEN])
  510. {
  511. struct sk_buff *skb;
  512. if (!ar->wmi.ops->gen_vdev_create)
  513. return -EOPNOTSUPP;
  514. skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
  515. if (IS_ERR(skb))
  516. return PTR_ERR(skb);
  517. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
  518. }
  519. static inline int
  520. ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
  521. {
  522. struct sk_buff *skb;
  523. if (!ar->wmi.ops->gen_vdev_delete)
  524. return -EOPNOTSUPP;
  525. skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
  526. if (IS_ERR(skb))
  527. return PTR_ERR(skb);
  528. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
  529. }
  530. static inline int
  531. ath10k_wmi_vdev_start(struct ath10k *ar,
  532. const struct wmi_vdev_start_request_arg *arg)
  533. {
  534. struct sk_buff *skb;
  535. if (!ar->wmi.ops->gen_vdev_start)
  536. return -EOPNOTSUPP;
  537. skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
  538. if (IS_ERR(skb))
  539. return PTR_ERR(skb);
  540. return ath10k_wmi_cmd_send(ar, skb,
  541. ar->wmi.cmd->vdev_start_request_cmdid);
  542. }
  543. static inline int
  544. ath10k_wmi_vdev_restart(struct ath10k *ar,
  545. const struct wmi_vdev_start_request_arg *arg)
  546. {
  547. struct sk_buff *skb;
  548. if (!ar->wmi.ops->gen_vdev_start)
  549. return -EOPNOTSUPP;
  550. skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
  551. if (IS_ERR(skb))
  552. return PTR_ERR(skb);
  553. return ath10k_wmi_cmd_send(ar, skb,
  554. ar->wmi.cmd->vdev_restart_request_cmdid);
  555. }
  556. static inline int
  557. ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
  558. {
  559. struct sk_buff *skb;
  560. if (!ar->wmi.ops->gen_vdev_stop)
  561. return -EOPNOTSUPP;
  562. skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
  563. if (IS_ERR(skb))
  564. return PTR_ERR(skb);
  565. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
  566. }
  567. static inline int
  568. ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
  569. {
  570. struct sk_buff *skb;
  571. if (!ar->wmi.ops->gen_vdev_up)
  572. return -EOPNOTSUPP;
  573. skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
  574. if (IS_ERR(skb))
  575. return PTR_ERR(skb);
  576. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
  577. }
  578. static inline int
  579. ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
  580. {
  581. struct sk_buff *skb;
  582. if (!ar->wmi.ops->gen_vdev_down)
  583. return -EOPNOTSUPP;
  584. skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
  585. if (IS_ERR(skb))
  586. return PTR_ERR(skb);
  587. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
  588. }
  589. static inline int
  590. ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
  591. u32 param_value)
  592. {
  593. struct sk_buff *skb;
  594. if (!ar->wmi.ops->gen_vdev_set_param)
  595. return -EOPNOTSUPP;
  596. skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
  597. param_value);
  598. if (IS_ERR(skb))
  599. return PTR_ERR(skb);
  600. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
  601. }
  602. static inline int
  603. ath10k_wmi_vdev_install_key(struct ath10k *ar,
  604. const struct wmi_vdev_install_key_arg *arg)
  605. {
  606. struct sk_buff *skb;
  607. if (!ar->wmi.ops->gen_vdev_install_key)
  608. return -EOPNOTSUPP;
  609. skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
  610. if (IS_ERR(skb))
  611. return PTR_ERR(skb);
  612. return ath10k_wmi_cmd_send(ar, skb,
  613. ar->wmi.cmd->vdev_install_key_cmdid);
  614. }
  615. static inline int
  616. ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
  617. const struct wmi_vdev_spectral_conf_arg *arg)
  618. {
  619. struct sk_buff *skb;
  620. u32 cmd_id;
  621. if (!ar->wmi.ops->gen_vdev_spectral_conf)
  622. return -EOPNOTSUPP;
  623. skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
  624. if (IS_ERR(skb))
  625. return PTR_ERR(skb);
  626. cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
  627. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  628. }
  629. static inline int
  630. ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
  631. u32 enable)
  632. {
  633. struct sk_buff *skb;
  634. u32 cmd_id;
  635. if (!ar->wmi.ops->gen_vdev_spectral_enable)
  636. return -EOPNOTSUPP;
  637. skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
  638. enable);
  639. if (IS_ERR(skb))
  640. return PTR_ERR(skb);
  641. cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
  642. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  643. }
  644. static inline int
  645. ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
  646. const u8 peer_addr[ETH_ALEN],
  647. const struct wmi_sta_uapsd_auto_trig_arg *args,
  648. u32 num_ac)
  649. {
  650. struct sk_buff *skb;
  651. u32 cmd_id;
  652. if (!ar->wmi.ops->gen_vdev_sta_uapsd)
  653. return -EOPNOTSUPP;
  654. skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
  655. num_ac);
  656. if (IS_ERR(skb))
  657. return PTR_ERR(skb);
  658. cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
  659. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  660. }
  661. static inline int
  662. ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
  663. const struct wmi_wmm_params_all_arg *arg)
  664. {
  665. struct sk_buff *skb;
  666. u32 cmd_id;
  667. skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
  668. if (IS_ERR(skb))
  669. return PTR_ERR(skb);
  670. cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
  671. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  672. }
  673. static inline int
  674. ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
  675. const u8 peer_addr[ETH_ALEN],
  676. enum wmi_peer_type peer_type)
  677. {
  678. struct sk_buff *skb;
  679. if (!ar->wmi.ops->gen_peer_create)
  680. return -EOPNOTSUPP;
  681. skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
  682. if (IS_ERR(skb))
  683. return PTR_ERR(skb);
  684. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
  685. }
  686. static inline int
  687. ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
  688. const u8 peer_addr[ETH_ALEN])
  689. {
  690. struct sk_buff *skb;
  691. if (!ar->wmi.ops->gen_peer_delete)
  692. return -EOPNOTSUPP;
  693. skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
  694. if (IS_ERR(skb))
  695. return PTR_ERR(skb);
  696. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
  697. }
  698. static inline int
  699. ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
  700. const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  701. {
  702. struct sk_buff *skb;
  703. if (!ar->wmi.ops->gen_peer_flush)
  704. return -EOPNOTSUPP;
  705. skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
  706. if (IS_ERR(skb))
  707. return PTR_ERR(skb);
  708. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
  709. }
  710. static inline int
  711. ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
  712. enum wmi_peer_param param_id, u32 param_value)
  713. {
  714. struct sk_buff *skb;
  715. if (!ar->wmi.ops->gen_peer_set_param)
  716. return -EOPNOTSUPP;
  717. skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
  718. param_value);
  719. if (IS_ERR(skb))
  720. return PTR_ERR(skb);
  721. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
  722. }
  723. static inline int
  724. ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
  725. enum wmi_sta_ps_mode psmode)
  726. {
  727. struct sk_buff *skb;
  728. if (!ar->wmi.ops->gen_set_psmode)
  729. return -EOPNOTSUPP;
  730. skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
  731. if (IS_ERR(skb))
  732. return PTR_ERR(skb);
  733. return ath10k_wmi_cmd_send(ar, skb,
  734. ar->wmi.cmd->sta_powersave_mode_cmdid);
  735. }
  736. static inline int
  737. ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
  738. enum wmi_sta_powersave_param param_id, u32 value)
  739. {
  740. struct sk_buff *skb;
  741. if (!ar->wmi.ops->gen_set_sta_ps)
  742. return -EOPNOTSUPP;
  743. skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
  744. if (IS_ERR(skb))
  745. return PTR_ERR(skb);
  746. return ath10k_wmi_cmd_send(ar, skb,
  747. ar->wmi.cmd->sta_powersave_param_cmdid);
  748. }
  749. static inline int
  750. ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  751. enum wmi_ap_ps_peer_param param_id, u32 value)
  752. {
  753. struct sk_buff *skb;
  754. if (!ar->wmi.ops->gen_set_ap_ps)
  755. return -EOPNOTSUPP;
  756. skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
  757. if (IS_ERR(skb))
  758. return PTR_ERR(skb);
  759. return ath10k_wmi_cmd_send(ar, skb,
  760. ar->wmi.cmd->ap_ps_peer_param_cmdid);
  761. }
  762. static inline int
  763. ath10k_wmi_scan_chan_list(struct ath10k *ar,
  764. const struct wmi_scan_chan_list_arg *arg)
  765. {
  766. struct sk_buff *skb;
  767. if (!ar->wmi.ops->gen_scan_chan_list)
  768. return -EOPNOTSUPP;
  769. skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
  770. if (IS_ERR(skb))
  771. return PTR_ERR(skb);
  772. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
  773. }
  774. static inline int
  775. ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
  776. {
  777. struct sk_buff *skb;
  778. u32 prob_req_oui;
  779. prob_req_oui = (((u32)mac_addr[0]) << 16) |
  780. (((u32)mac_addr[1]) << 8) | mac_addr[2];
  781. if (!ar->wmi.ops->gen_scan_prob_req_oui)
  782. return -EOPNOTSUPP;
  783. skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
  784. if (IS_ERR(skb))
  785. return PTR_ERR(skb);
  786. return ath10k_wmi_cmd_send(ar, skb,
  787. ar->wmi.cmd->scan_prob_req_oui_cmdid);
  788. }
  789. static inline int
  790. ath10k_wmi_peer_assoc(struct ath10k *ar,
  791. const struct wmi_peer_assoc_complete_arg *arg)
  792. {
  793. struct sk_buff *skb;
  794. if (!ar->wmi.ops->gen_peer_assoc)
  795. return -EOPNOTSUPP;
  796. skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
  797. if (IS_ERR(skb))
  798. return PTR_ERR(skb);
  799. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
  800. }
  801. static inline int
  802. ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
  803. const void *bcn, size_t bcn_len,
  804. u32 bcn_paddr, bool dtim_zero,
  805. bool deliver_cab)
  806. {
  807. struct sk_buff *skb;
  808. int ret;
  809. if (!ar->wmi.ops->gen_beacon_dma)
  810. return -EOPNOTSUPP;
  811. skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
  812. dtim_zero, deliver_cab);
  813. if (IS_ERR(skb))
  814. return PTR_ERR(skb);
  815. ret = ath10k_wmi_cmd_send_nowait(ar, skb,
  816. ar->wmi.cmd->pdev_send_bcn_cmdid);
  817. if (ret) {
  818. dev_kfree_skb(skb);
  819. return ret;
  820. }
  821. return 0;
  822. }
  823. static inline int
  824. ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
  825. const struct wmi_wmm_params_all_arg *arg)
  826. {
  827. struct sk_buff *skb;
  828. if (!ar->wmi.ops->gen_pdev_set_wmm)
  829. return -EOPNOTSUPP;
  830. skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
  831. if (IS_ERR(skb))
  832. return PTR_ERR(skb);
  833. return ath10k_wmi_cmd_send(ar, skb,
  834. ar->wmi.cmd->pdev_set_wmm_params_cmdid);
  835. }
  836. static inline int
  837. ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
  838. {
  839. struct sk_buff *skb;
  840. if (!ar->wmi.ops->gen_request_stats)
  841. return -EOPNOTSUPP;
  842. skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
  843. if (IS_ERR(skb))
  844. return PTR_ERR(skb);
  845. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
  846. }
  847. static inline int
  848. ath10k_wmi_force_fw_hang(struct ath10k *ar,
  849. enum wmi_force_fw_hang_type type, u32 delay_ms)
  850. {
  851. struct sk_buff *skb;
  852. if (!ar->wmi.ops->gen_force_fw_hang)
  853. return -EOPNOTSUPP;
  854. skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
  855. if (IS_ERR(skb))
  856. return PTR_ERR(skb);
  857. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
  858. }
  859. static inline int
  860. ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
  861. {
  862. struct sk_buff *skb;
  863. if (!ar->wmi.ops->gen_dbglog_cfg)
  864. return -EOPNOTSUPP;
  865. skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
  866. if (IS_ERR(skb))
  867. return PTR_ERR(skb);
  868. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
  869. }
  870. static inline int
  871. ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
  872. {
  873. struct sk_buff *skb;
  874. if (!ar->wmi.ops->gen_pktlog_enable)
  875. return -EOPNOTSUPP;
  876. skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
  877. if (IS_ERR(skb))
  878. return PTR_ERR(skb);
  879. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
  880. }
  881. static inline int
  882. ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
  883. {
  884. struct sk_buff *skb;
  885. if (!ar->wmi.ops->gen_pktlog_disable)
  886. return -EOPNOTSUPP;
  887. skb = ar->wmi.ops->gen_pktlog_disable(ar);
  888. if (IS_ERR(skb))
  889. return PTR_ERR(skb);
  890. return ath10k_wmi_cmd_send(ar, skb,
  891. ar->wmi.cmd->pdev_pktlog_disable_cmdid);
  892. }
  893. static inline int
  894. ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
  895. u32 next_offset, u32 enabled)
  896. {
  897. struct sk_buff *skb;
  898. if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
  899. return -EOPNOTSUPP;
  900. skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
  901. next_offset, enabled);
  902. if (IS_ERR(skb))
  903. return PTR_ERR(skb);
  904. return ath10k_wmi_cmd_send(ar, skb,
  905. ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
  906. }
  907. static inline int
  908. ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
  909. {
  910. struct sk_buff *skb;
  911. if (!ar->wmi.ops->gen_pdev_get_temperature)
  912. return -EOPNOTSUPP;
  913. skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
  914. if (IS_ERR(skb))
  915. return PTR_ERR(skb);
  916. return ath10k_wmi_cmd_send(ar, skb,
  917. ar->wmi.cmd->pdev_get_temperature_cmdid);
  918. }
  919. static inline int
  920. ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
  921. {
  922. struct sk_buff *skb;
  923. if (!ar->wmi.ops->gen_addba_clear_resp)
  924. return -EOPNOTSUPP;
  925. skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
  926. if (IS_ERR(skb))
  927. return PTR_ERR(skb);
  928. return ath10k_wmi_cmd_send(ar, skb,
  929. ar->wmi.cmd->addba_clear_resp_cmdid);
  930. }
  931. static inline int
  932. ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  933. u32 tid, u32 buf_size)
  934. {
  935. struct sk_buff *skb;
  936. if (!ar->wmi.ops->gen_addba_send)
  937. return -EOPNOTSUPP;
  938. skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
  939. if (IS_ERR(skb))
  940. return PTR_ERR(skb);
  941. return ath10k_wmi_cmd_send(ar, skb,
  942. ar->wmi.cmd->addba_send_cmdid);
  943. }
  944. static inline int
  945. ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  946. u32 tid, u32 status)
  947. {
  948. struct sk_buff *skb;
  949. if (!ar->wmi.ops->gen_addba_set_resp)
  950. return -EOPNOTSUPP;
  951. skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
  952. if (IS_ERR(skb))
  953. return PTR_ERR(skb);
  954. return ath10k_wmi_cmd_send(ar, skb,
  955. ar->wmi.cmd->addba_set_resp_cmdid);
  956. }
  957. static inline int
  958. ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  959. u32 tid, u32 initiator, u32 reason)
  960. {
  961. struct sk_buff *skb;
  962. if (!ar->wmi.ops->gen_delba_send)
  963. return -EOPNOTSUPP;
  964. skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
  965. reason);
  966. if (IS_ERR(skb))
  967. return PTR_ERR(skb);
  968. return ath10k_wmi_cmd_send(ar, skb,
  969. ar->wmi.cmd->delba_send_cmdid);
  970. }
  971. static inline int
  972. ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
  973. struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
  974. void *prb_ies, size_t prb_ies_len)
  975. {
  976. struct sk_buff *skb;
  977. if (!ar->wmi.ops->gen_bcn_tmpl)
  978. return -EOPNOTSUPP;
  979. skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
  980. prb_caps, prb_erp, prb_ies,
  981. prb_ies_len);
  982. if (IS_ERR(skb))
  983. return PTR_ERR(skb);
  984. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
  985. }
  986. static inline int
  987. ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
  988. {
  989. struct sk_buff *skb;
  990. if (!ar->wmi.ops->gen_prb_tmpl)
  991. return -EOPNOTSUPP;
  992. skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
  993. if (IS_ERR(skb))
  994. return PTR_ERR(skb);
  995. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
  996. }
  997. static inline int
  998. ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
  999. {
  1000. struct sk_buff *skb;
  1001. if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
  1002. return -EOPNOTSUPP;
  1003. skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
  1004. if (IS_ERR(skb))
  1005. return PTR_ERR(skb);
  1006. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
  1007. }
  1008. static inline int
  1009. ath10k_wmi_sta_keepalive(struct ath10k *ar,
  1010. const struct wmi_sta_keepalive_arg *arg)
  1011. {
  1012. struct sk_buff *skb;
  1013. u32 cmd_id;
  1014. if (!ar->wmi.ops->gen_sta_keepalive)
  1015. return -EOPNOTSUPP;
  1016. skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
  1017. if (IS_ERR(skb))
  1018. return PTR_ERR(skb);
  1019. cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
  1020. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1021. }
  1022. static inline int
  1023. ath10k_wmi_wow_enable(struct ath10k *ar)
  1024. {
  1025. struct sk_buff *skb;
  1026. u32 cmd_id;
  1027. if (!ar->wmi.ops->gen_wow_enable)
  1028. return -EOPNOTSUPP;
  1029. skb = ar->wmi.ops->gen_wow_enable(ar);
  1030. if (IS_ERR(skb))
  1031. return PTR_ERR(skb);
  1032. cmd_id = ar->wmi.cmd->wow_enable_cmdid;
  1033. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1034. }
  1035. static inline int
  1036. ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
  1037. enum wmi_wow_wakeup_event event,
  1038. u32 enable)
  1039. {
  1040. struct sk_buff *skb;
  1041. u32 cmd_id;
  1042. if (!ar->wmi.ops->gen_wow_add_wakeup_event)
  1043. return -EOPNOTSUPP;
  1044. skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
  1045. if (IS_ERR(skb))
  1046. return PTR_ERR(skb);
  1047. cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
  1048. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1049. }
  1050. static inline int
  1051. ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
  1052. {
  1053. struct sk_buff *skb;
  1054. u32 cmd_id;
  1055. if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
  1056. return -EOPNOTSUPP;
  1057. skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
  1058. if (IS_ERR(skb))
  1059. return PTR_ERR(skb);
  1060. cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
  1061. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1062. }
  1063. static inline int
  1064. ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
  1065. const u8 *pattern, const u8 *mask,
  1066. int pattern_len, int pattern_offset)
  1067. {
  1068. struct sk_buff *skb;
  1069. u32 cmd_id;
  1070. if (!ar->wmi.ops->gen_wow_add_pattern)
  1071. return -EOPNOTSUPP;
  1072. skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
  1073. pattern, mask, pattern_len,
  1074. pattern_offset);
  1075. if (IS_ERR(skb))
  1076. return PTR_ERR(skb);
  1077. cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
  1078. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1079. }
  1080. static inline int
  1081. ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
  1082. {
  1083. struct sk_buff *skb;
  1084. u32 cmd_id;
  1085. if (!ar->wmi.ops->gen_wow_del_pattern)
  1086. return -EOPNOTSUPP;
  1087. skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
  1088. if (IS_ERR(skb))
  1089. return PTR_ERR(skb);
  1090. cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
  1091. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1092. }
  1093. static inline int
  1094. ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
  1095. enum wmi_tdls_state state)
  1096. {
  1097. struct sk_buff *skb;
  1098. if (!ar->wmi.ops->gen_update_fw_tdls_state)
  1099. return -EOPNOTSUPP;
  1100. skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
  1101. if (IS_ERR(skb))
  1102. return PTR_ERR(skb);
  1103. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
  1104. }
  1105. static inline int
  1106. ath10k_wmi_tdls_peer_update(struct ath10k *ar,
  1107. const struct wmi_tdls_peer_update_cmd_arg *arg,
  1108. const struct wmi_tdls_peer_capab_arg *cap,
  1109. const struct wmi_channel_arg *chan)
  1110. {
  1111. struct sk_buff *skb;
  1112. if (!ar->wmi.ops->gen_tdls_peer_update)
  1113. return -EOPNOTSUPP;
  1114. skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
  1115. if (IS_ERR(skb))
  1116. return PTR_ERR(skb);
  1117. return ath10k_wmi_cmd_send(ar, skb,
  1118. ar->wmi.cmd->tdls_peer_update_cmdid);
  1119. }
  1120. static inline int
  1121. ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
  1122. {
  1123. struct sk_buff *skb;
  1124. if (!ar->wmi.ops->gen_adaptive_qcs)
  1125. return -EOPNOTSUPP;
  1126. skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
  1127. if (IS_ERR(skb))
  1128. return PTR_ERR(skb);
  1129. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
  1130. }
  1131. static inline int
  1132. ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
  1133. {
  1134. struct sk_buff *skb;
  1135. if (!ar->wmi.ops->gen_pdev_get_tpc_config)
  1136. return -EOPNOTSUPP;
  1137. skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
  1138. if (IS_ERR(skb))
  1139. return PTR_ERR(skb);
  1140. return ath10k_wmi_cmd_send(ar, skb,
  1141. ar->wmi.cmd->pdev_get_tpc_config_cmdid);
  1142. }
  1143. static inline int
  1144. ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
  1145. char *buf)
  1146. {
  1147. if (!ar->wmi.ops->fw_stats_fill)
  1148. return -EOPNOTSUPP;
  1149. ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
  1150. return 0;
  1151. }
  1152. static inline int
  1153. ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
  1154. u32 detect_level, u32 detect_margin)
  1155. {
  1156. struct sk_buff *skb;
  1157. if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
  1158. return -EOPNOTSUPP;
  1159. skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
  1160. detect_level,
  1161. detect_margin);
  1162. if (IS_ERR(skb))
  1163. return PTR_ERR(skb);
  1164. return ath10k_wmi_cmd_send(ar, skb,
  1165. ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
  1166. }
  1167. static inline int
  1168. ath10k_wmi_ext_resource_config(struct ath10k *ar,
  1169. enum wmi_host_platform_type type,
  1170. u32 fw_feature_bitmap)
  1171. {
  1172. struct sk_buff *skb;
  1173. if (!ar->wmi.ops->ext_resource_config)
  1174. return -EOPNOTSUPP;
  1175. skb = ar->wmi.ops->ext_resource_config(ar, type,
  1176. fw_feature_bitmap);
  1177. if (IS_ERR(skb))
  1178. return PTR_ERR(skb);
  1179. return ath10k_wmi_cmd_send(ar, skb,
  1180. ar->wmi.cmd->ext_resource_cfg_cmdid);
  1181. }
  1182. static inline int
  1183. ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
  1184. {
  1185. if (!ar->wmi.ops->get_vdev_subtype)
  1186. return -EOPNOTSUPP;
  1187. return ar->wmi.ops->get_vdev_subtype(ar, subtype);
  1188. }
  1189. static inline int
  1190. ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
  1191. enum wmi_bss_survey_req_type type)
  1192. {
  1193. struct ath10k_wmi *wmi = &ar->wmi;
  1194. struct sk_buff *skb;
  1195. if (!wmi->ops->gen_pdev_bss_chan_info_req)
  1196. return -EOPNOTSUPP;
  1197. skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
  1198. if (IS_ERR(skb))
  1199. return PTR_ERR(skb);
  1200. return ath10k_wmi_cmd_send(ar, skb,
  1201. wmi->cmd->pdev_bss_chan_info_request_cmdid);
  1202. }
  1203. static inline int
  1204. ath10k_wmi_echo(struct ath10k *ar, u32 value)
  1205. {
  1206. struct ath10k_wmi *wmi = &ar->wmi;
  1207. struct sk_buff *skb;
  1208. if (!wmi->ops->gen_echo)
  1209. return -EOPNOTSUPP;
  1210. skb = wmi->ops->gen_echo(ar, value);
  1211. if (IS_ERR(skb))
  1212. return PTR_ERR(skb);
  1213. return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
  1214. }
  1215. static inline int
  1216. ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
  1217. {
  1218. struct sk_buff *skb;
  1219. if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
  1220. return -EOPNOTSUPP;
  1221. skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
  1222. if (IS_ERR(skb))
  1223. return PTR_ERR(skb);
  1224. return ath10k_wmi_cmd_send(ar, skb,
  1225. ar->wmi.cmd->pdev_get_tpc_table_cmdid);
  1226. }
  1227. static inline int
  1228. ath10k_wmi_report_radar_found(struct ath10k *ar,
  1229. const struct ath10k_radar_found_info *arg)
  1230. {
  1231. struct sk_buff *skb;
  1232. if (!ar->wmi.ops->gen_radar_found)
  1233. return -EOPNOTSUPP;
  1234. skb = ar->wmi.ops->gen_radar_found(ar, arg);
  1235. if (IS_ERR(skb))
  1236. return PTR_ERR(skb);
  1237. return ath10k_wmi_cmd_send(ar, skb,
  1238. ar->wmi.cmd->radar_found_cmdid);
  1239. }
  1240. #endif