wmi-ops.h 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  4. * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  5. *
  6. * Permission to use, copy, modify, and/or distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _WMI_OPS_H_
  19. #define _WMI_OPS_H_
  20. struct ath10k;
  21. struct sk_buff;
  22. struct wmi_ops {
  23. void (*rx)(struct ath10k *ar, struct sk_buff *skb);
  24. void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
  25. int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
  26. struct wmi_scan_ev_arg *arg);
  27. int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
  28. struct wmi_mgmt_rx_ev_arg *arg);
  29. int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
  30. struct wmi_ch_info_ev_arg *arg);
  31. int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
  32. struct wmi_vdev_start_ev_arg *arg);
  33. int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
  34. struct wmi_peer_kick_ev_arg *arg);
  35. int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
  36. struct wmi_swba_ev_arg *arg);
  37. int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
  38. struct wmi_phyerr_hdr_arg *arg);
  39. int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
  40. int left_len, struct wmi_phyerr_ev_arg *arg);
  41. int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
  42. struct wmi_svc_rdy_ev_arg *arg);
  43. int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
  44. struct wmi_rdy_ev_arg *arg);
  45. int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
  46. struct ath10k_fw_stats *stats);
  47. int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
  48. struct wmi_roam_ev_arg *arg);
  49. int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
  50. struct wmi_wow_ev_arg *arg);
  51. int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
  52. struct wmi_echo_ev_arg *arg);
  53. enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
  54. struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
  55. struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
  56. struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
  57. u16 rd5g, u16 ctl2g, u16 ctl5g,
  58. enum wmi_dfs_region dfs_reg);
  59. struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
  60. u32 value);
  61. struct sk_buff *(*gen_init)(struct ath10k *ar);
  62. struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
  63. const struct wmi_start_scan_arg *arg);
  64. struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
  65. const struct wmi_stop_scan_arg *arg);
  66. struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
  67. enum wmi_vdev_type type,
  68. enum wmi_vdev_subtype subtype,
  69. const u8 macaddr[ETH_ALEN]);
  70. struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
  71. struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
  72. const struct wmi_vdev_start_request_arg *arg,
  73. bool restart);
  74. struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
  75. struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
  76. const u8 *bssid);
  77. struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
  78. struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
  79. u32 param_id, u32 param_value);
  80. struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
  81. const struct wmi_vdev_install_key_arg *arg);
  82. struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
  83. const struct wmi_vdev_spectral_conf_arg *arg);
  84. struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
  85. u32 trigger, u32 enable);
  86. struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
  87. const struct wmi_wmm_params_all_arg *arg);
  88. struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
  89. const u8 peer_addr[ETH_ALEN],
  90. enum wmi_peer_type peer_type);
  91. struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
  92. const u8 peer_addr[ETH_ALEN]);
  93. struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
  94. const u8 peer_addr[ETH_ALEN],
  95. u32 tid_bitmap);
  96. struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
  97. const u8 *peer_addr,
  98. enum wmi_peer_param param_id,
  99. u32 param_value);
  100. struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
  101. const struct wmi_peer_assoc_complete_arg *arg);
  102. struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
  103. enum wmi_sta_ps_mode psmode);
  104. struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
  105. enum wmi_sta_powersave_param param_id,
  106. u32 value);
  107. struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
  108. const u8 *mac,
  109. enum wmi_ap_ps_peer_param param_id,
  110. u32 value);
  111. struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
  112. const struct wmi_scan_chan_list_arg *arg);
  113. struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
  114. const void *bcn, size_t bcn_len,
  115. u32 bcn_paddr, bool dtim_zero,
  116. bool deliver_cab);
  117. struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
  118. const struct wmi_wmm_params_all_arg *arg);
  119. struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
  120. struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
  121. enum wmi_force_fw_hang_type type,
  122. u32 delay_ms);
  123. struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
  124. struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
  125. struct sk_buff *skb,
  126. dma_addr_t paddr);
  127. struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
  128. u32 log_level);
  129. struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
  130. struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
  131. struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
  132. u32 period, u32 duration,
  133. u32 next_offset,
  134. u32 enabled);
  135. struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
  136. struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
  137. const u8 *mac);
  138. struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
  139. const u8 *mac, u32 tid, u32 buf_size);
  140. struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
  141. const u8 *mac, u32 tid,
  142. u32 status);
  143. struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
  144. const u8 *mac, u32 tid, u32 initiator,
  145. u32 reason);
  146. struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
  147. u32 tim_ie_offset, struct sk_buff *bcn,
  148. u32 prb_caps, u32 prb_erp,
  149. void *prb_ies, size_t prb_ies_len);
  150. struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
  151. struct sk_buff *bcn);
  152. struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
  153. const u8 *p2p_ie);
  154. struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
  155. const u8 peer_addr[ETH_ALEN],
  156. const struct wmi_sta_uapsd_auto_trig_arg *args,
  157. u32 num_ac);
  158. struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
  159. const struct wmi_sta_keepalive_arg *arg);
  160. struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
  161. struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
  162. enum wmi_wow_wakeup_event event,
  163. u32 enable);
  164. struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
  165. struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
  166. u32 pattern_id,
  167. const u8 *pattern,
  168. const u8 *mask,
  169. int pattern_len,
  170. int pattern_offset);
  171. struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
  172. u32 pattern_id);
  173. struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
  174. u32 vdev_id,
  175. enum wmi_tdls_state state);
  176. struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
  177. const struct wmi_tdls_peer_update_cmd_arg *arg,
  178. const struct wmi_tdls_peer_capab_arg *cap,
  179. const struct wmi_channel_arg *chan);
  180. struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
  181. struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
  182. u32 param);
  183. void (*fw_stats_fill)(struct ath10k *ar,
  184. struct ath10k_fw_stats *fw_stats,
  185. char *buf);
  186. struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
  187. u8 enable,
  188. u32 detect_level,
  189. u32 detect_margin);
  190. struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
  191. enum wmi_host_platform_type type,
  192. u32 fw_feature_bitmap);
  193. int (*get_vdev_subtype)(struct ath10k *ar,
  194. enum wmi_vdev_subtype subtype);
  195. struct sk_buff *(*gen_pdev_bss_chan_info_req)
  196. (struct ath10k *ar,
  197. enum wmi_bss_survey_req_type type);
  198. struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
  199. struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
  200. u32 param);
  201. };
  202. int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
  203. static inline int
  204. ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
  205. {
  206. if (WARN_ON_ONCE(!ar->wmi.ops->rx))
  207. return -EOPNOTSUPP;
  208. ar->wmi.ops->rx(ar, skb);
  209. return 0;
  210. }
  211. static inline int
  212. ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
  213. size_t len)
  214. {
  215. if (!ar->wmi.ops->map_svc)
  216. return -EOPNOTSUPP;
  217. ar->wmi.ops->map_svc(in, out, len);
  218. return 0;
  219. }
  220. static inline int
  221. ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
  222. struct wmi_scan_ev_arg *arg)
  223. {
  224. if (!ar->wmi.ops->pull_scan)
  225. return -EOPNOTSUPP;
  226. return ar->wmi.ops->pull_scan(ar, skb, arg);
  227. }
  228. static inline int
  229. ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
  230. struct wmi_mgmt_rx_ev_arg *arg)
  231. {
  232. if (!ar->wmi.ops->pull_mgmt_rx)
  233. return -EOPNOTSUPP;
  234. return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
  235. }
  236. static inline int
  237. ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
  238. struct wmi_ch_info_ev_arg *arg)
  239. {
  240. if (!ar->wmi.ops->pull_ch_info)
  241. return -EOPNOTSUPP;
  242. return ar->wmi.ops->pull_ch_info(ar, skb, arg);
  243. }
  244. static inline int
  245. ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
  246. struct wmi_vdev_start_ev_arg *arg)
  247. {
  248. if (!ar->wmi.ops->pull_vdev_start)
  249. return -EOPNOTSUPP;
  250. return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
  251. }
  252. static inline int
  253. ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
  254. struct wmi_peer_kick_ev_arg *arg)
  255. {
  256. if (!ar->wmi.ops->pull_peer_kick)
  257. return -EOPNOTSUPP;
  258. return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
  259. }
  260. static inline int
  261. ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
  262. struct wmi_swba_ev_arg *arg)
  263. {
  264. if (!ar->wmi.ops->pull_swba)
  265. return -EOPNOTSUPP;
  266. return ar->wmi.ops->pull_swba(ar, skb, arg);
  267. }
  268. static inline int
  269. ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
  270. struct wmi_phyerr_hdr_arg *arg)
  271. {
  272. if (!ar->wmi.ops->pull_phyerr_hdr)
  273. return -EOPNOTSUPP;
  274. return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
  275. }
  276. static inline int
  277. ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
  278. int left_len, struct wmi_phyerr_ev_arg *arg)
  279. {
  280. if (!ar->wmi.ops->pull_phyerr)
  281. return -EOPNOTSUPP;
  282. return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
  283. }
  284. static inline int
  285. ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
  286. struct wmi_svc_rdy_ev_arg *arg)
  287. {
  288. if (!ar->wmi.ops->pull_svc_rdy)
  289. return -EOPNOTSUPP;
  290. return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
  291. }
  292. static inline int
  293. ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
  294. struct wmi_rdy_ev_arg *arg)
  295. {
  296. if (!ar->wmi.ops->pull_rdy)
  297. return -EOPNOTSUPP;
  298. return ar->wmi.ops->pull_rdy(ar, skb, arg);
  299. }
  300. static inline int
  301. ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
  302. struct ath10k_fw_stats *stats)
  303. {
  304. if (!ar->wmi.ops->pull_fw_stats)
  305. return -EOPNOTSUPP;
  306. return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
  307. }
  308. static inline int
  309. ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
  310. struct wmi_roam_ev_arg *arg)
  311. {
  312. if (!ar->wmi.ops->pull_roam_ev)
  313. return -EOPNOTSUPP;
  314. return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
  315. }
  316. static inline int
  317. ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
  318. struct wmi_wow_ev_arg *arg)
  319. {
  320. if (!ar->wmi.ops->pull_wow_event)
  321. return -EOPNOTSUPP;
  322. return ar->wmi.ops->pull_wow_event(ar, skb, arg);
  323. }
  324. static inline int
  325. ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
  326. struct wmi_echo_ev_arg *arg)
  327. {
  328. if (!ar->wmi.ops->pull_echo_ev)
  329. return -EOPNOTSUPP;
  330. return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
  331. }
  332. static inline enum wmi_txbf_conf
  333. ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
  334. {
  335. if (!ar->wmi.ops->get_txbf_conf_scheme)
  336. return WMI_TXBF_CONF_UNSUPPORTED;
  337. return ar->wmi.ops->get_txbf_conf_scheme(ar);
  338. }
  339. static inline int
  340. ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
  341. dma_addr_t paddr)
  342. {
  343. struct sk_buff *skb;
  344. int ret;
  345. if (!ar->wmi.ops->gen_mgmt_tx_send)
  346. return -EOPNOTSUPP;
  347. skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
  348. if (IS_ERR(skb))
  349. return PTR_ERR(skb);
  350. ret = ath10k_wmi_cmd_send(ar, skb,
  351. ar->wmi.cmd->mgmt_tx_send_cmdid);
  352. if (ret)
  353. return ret;
  354. return 0;
  355. }
  356. static inline int
  357. ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
  358. {
  359. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
  360. struct sk_buff *skb;
  361. int ret;
  362. if (!ar->wmi.ops->gen_mgmt_tx)
  363. return -EOPNOTSUPP;
  364. skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
  365. if (IS_ERR(skb))
  366. return PTR_ERR(skb);
  367. ret = ath10k_wmi_cmd_send(ar, skb,
  368. ar->wmi.cmd->mgmt_tx_cmdid);
  369. if (ret)
  370. return ret;
  371. /* FIXME There's no ACK event for Management Tx. This probably
  372. * shouldn't be called here either.
  373. */
  374. info->flags |= IEEE80211_TX_STAT_ACK;
  375. ieee80211_tx_status_irqsafe(ar->hw, msdu);
  376. return 0;
  377. }
  378. static inline int
  379. ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
  380. u16 ctl2g, u16 ctl5g,
  381. enum wmi_dfs_region dfs_reg)
  382. {
  383. struct sk_buff *skb;
  384. if (!ar->wmi.ops->gen_pdev_set_rd)
  385. return -EOPNOTSUPP;
  386. skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
  387. dfs_reg);
  388. if (IS_ERR(skb))
  389. return PTR_ERR(skb);
  390. return ath10k_wmi_cmd_send(ar, skb,
  391. ar->wmi.cmd->pdev_set_regdomain_cmdid);
  392. }
  393. static inline int
  394. ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
  395. {
  396. struct sk_buff *skb;
  397. if (!ar->wmi.ops->gen_pdev_suspend)
  398. return -EOPNOTSUPP;
  399. skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
  400. if (IS_ERR(skb))
  401. return PTR_ERR(skb);
  402. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
  403. }
  404. static inline int
  405. ath10k_wmi_pdev_resume_target(struct ath10k *ar)
  406. {
  407. struct sk_buff *skb;
  408. if (!ar->wmi.ops->gen_pdev_resume)
  409. return -EOPNOTSUPP;
  410. skb = ar->wmi.ops->gen_pdev_resume(ar);
  411. if (IS_ERR(skb))
  412. return PTR_ERR(skb);
  413. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
  414. }
  415. static inline int
  416. ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
  417. {
  418. struct sk_buff *skb;
  419. if (!ar->wmi.ops->gen_pdev_set_param)
  420. return -EOPNOTSUPP;
  421. skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
  422. if (IS_ERR(skb))
  423. return PTR_ERR(skb);
  424. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
  425. }
  426. static inline int
  427. ath10k_wmi_cmd_init(struct ath10k *ar)
  428. {
  429. struct sk_buff *skb;
  430. if (!ar->wmi.ops->gen_init)
  431. return -EOPNOTSUPP;
  432. skb = ar->wmi.ops->gen_init(ar);
  433. if (IS_ERR(skb))
  434. return PTR_ERR(skb);
  435. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
  436. }
  437. static inline int
  438. ath10k_wmi_start_scan(struct ath10k *ar,
  439. const struct wmi_start_scan_arg *arg)
  440. {
  441. struct sk_buff *skb;
  442. if (!ar->wmi.ops->gen_start_scan)
  443. return -EOPNOTSUPP;
  444. skb = ar->wmi.ops->gen_start_scan(ar, arg);
  445. if (IS_ERR(skb))
  446. return PTR_ERR(skb);
  447. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
  448. }
  449. static inline int
  450. ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
  451. {
  452. struct sk_buff *skb;
  453. if (!ar->wmi.ops->gen_stop_scan)
  454. return -EOPNOTSUPP;
  455. skb = ar->wmi.ops->gen_stop_scan(ar, arg);
  456. if (IS_ERR(skb))
  457. return PTR_ERR(skb);
  458. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
  459. }
  460. static inline int
  461. ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
  462. enum wmi_vdev_type type,
  463. enum wmi_vdev_subtype subtype,
  464. const u8 macaddr[ETH_ALEN])
  465. {
  466. struct sk_buff *skb;
  467. if (!ar->wmi.ops->gen_vdev_create)
  468. return -EOPNOTSUPP;
  469. skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
  470. if (IS_ERR(skb))
  471. return PTR_ERR(skb);
  472. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
  473. }
  474. static inline int
  475. ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
  476. {
  477. struct sk_buff *skb;
  478. if (!ar->wmi.ops->gen_vdev_delete)
  479. return -EOPNOTSUPP;
  480. skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
  481. if (IS_ERR(skb))
  482. return PTR_ERR(skb);
  483. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
  484. }
  485. static inline int
  486. ath10k_wmi_vdev_start(struct ath10k *ar,
  487. const struct wmi_vdev_start_request_arg *arg)
  488. {
  489. struct sk_buff *skb;
  490. if (!ar->wmi.ops->gen_vdev_start)
  491. return -EOPNOTSUPP;
  492. skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
  493. if (IS_ERR(skb))
  494. return PTR_ERR(skb);
  495. return ath10k_wmi_cmd_send(ar, skb,
  496. ar->wmi.cmd->vdev_start_request_cmdid);
  497. }
  498. static inline int
  499. ath10k_wmi_vdev_restart(struct ath10k *ar,
  500. const struct wmi_vdev_start_request_arg *arg)
  501. {
  502. struct sk_buff *skb;
  503. if (!ar->wmi.ops->gen_vdev_start)
  504. return -EOPNOTSUPP;
  505. skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
  506. if (IS_ERR(skb))
  507. return PTR_ERR(skb);
  508. return ath10k_wmi_cmd_send(ar, skb,
  509. ar->wmi.cmd->vdev_restart_request_cmdid);
  510. }
  511. static inline int
  512. ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
  513. {
  514. struct sk_buff *skb;
  515. if (!ar->wmi.ops->gen_vdev_stop)
  516. return -EOPNOTSUPP;
  517. skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
  518. if (IS_ERR(skb))
  519. return PTR_ERR(skb);
  520. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
  521. }
  522. static inline int
  523. ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
  524. {
  525. struct sk_buff *skb;
  526. if (!ar->wmi.ops->gen_vdev_up)
  527. return -EOPNOTSUPP;
  528. skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
  529. if (IS_ERR(skb))
  530. return PTR_ERR(skb);
  531. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
  532. }
  533. static inline int
  534. ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
  535. {
  536. struct sk_buff *skb;
  537. if (!ar->wmi.ops->gen_vdev_down)
  538. return -EOPNOTSUPP;
  539. skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
  540. if (IS_ERR(skb))
  541. return PTR_ERR(skb);
  542. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
  543. }
  544. static inline int
  545. ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
  546. u32 param_value)
  547. {
  548. struct sk_buff *skb;
  549. if (!ar->wmi.ops->gen_vdev_set_param)
  550. return -EOPNOTSUPP;
  551. skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
  552. param_value);
  553. if (IS_ERR(skb))
  554. return PTR_ERR(skb);
  555. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
  556. }
  557. static inline int
  558. ath10k_wmi_vdev_install_key(struct ath10k *ar,
  559. const struct wmi_vdev_install_key_arg *arg)
  560. {
  561. struct sk_buff *skb;
  562. if (!ar->wmi.ops->gen_vdev_install_key)
  563. return -EOPNOTSUPP;
  564. skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
  565. if (IS_ERR(skb))
  566. return PTR_ERR(skb);
  567. return ath10k_wmi_cmd_send(ar, skb,
  568. ar->wmi.cmd->vdev_install_key_cmdid);
  569. }
  570. static inline int
  571. ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
  572. const struct wmi_vdev_spectral_conf_arg *arg)
  573. {
  574. struct sk_buff *skb;
  575. u32 cmd_id;
  576. if (!ar->wmi.ops->gen_vdev_spectral_conf)
  577. return -EOPNOTSUPP;
  578. skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
  579. if (IS_ERR(skb))
  580. return PTR_ERR(skb);
  581. cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
  582. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  583. }
  584. static inline int
  585. ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
  586. u32 enable)
  587. {
  588. struct sk_buff *skb;
  589. u32 cmd_id;
  590. if (!ar->wmi.ops->gen_vdev_spectral_enable)
  591. return -EOPNOTSUPP;
  592. skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
  593. enable);
  594. if (IS_ERR(skb))
  595. return PTR_ERR(skb);
  596. cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
  597. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  598. }
  599. static inline int
  600. ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
  601. const u8 peer_addr[ETH_ALEN],
  602. const struct wmi_sta_uapsd_auto_trig_arg *args,
  603. u32 num_ac)
  604. {
  605. struct sk_buff *skb;
  606. u32 cmd_id;
  607. if (!ar->wmi.ops->gen_vdev_sta_uapsd)
  608. return -EOPNOTSUPP;
  609. skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
  610. num_ac);
  611. if (IS_ERR(skb))
  612. return PTR_ERR(skb);
  613. cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
  614. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  615. }
  616. static inline int
  617. ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
  618. const struct wmi_wmm_params_all_arg *arg)
  619. {
  620. struct sk_buff *skb;
  621. u32 cmd_id;
  622. skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
  623. if (IS_ERR(skb))
  624. return PTR_ERR(skb);
  625. cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
  626. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  627. }
  628. static inline int
  629. ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
  630. const u8 peer_addr[ETH_ALEN],
  631. enum wmi_peer_type peer_type)
  632. {
  633. struct sk_buff *skb;
  634. if (!ar->wmi.ops->gen_peer_create)
  635. return -EOPNOTSUPP;
  636. skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
  637. if (IS_ERR(skb))
  638. return PTR_ERR(skb);
  639. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
  640. }
  641. static inline int
  642. ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
  643. const u8 peer_addr[ETH_ALEN])
  644. {
  645. struct sk_buff *skb;
  646. if (!ar->wmi.ops->gen_peer_delete)
  647. return -EOPNOTSUPP;
  648. skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
  649. if (IS_ERR(skb))
  650. return PTR_ERR(skb);
  651. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
  652. }
  653. static inline int
  654. ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
  655. const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  656. {
  657. struct sk_buff *skb;
  658. if (!ar->wmi.ops->gen_peer_flush)
  659. return -EOPNOTSUPP;
  660. skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
  661. if (IS_ERR(skb))
  662. return PTR_ERR(skb);
  663. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
  664. }
  665. static inline int
  666. ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
  667. enum wmi_peer_param param_id, u32 param_value)
  668. {
  669. struct sk_buff *skb;
  670. if (!ar->wmi.ops->gen_peer_set_param)
  671. return -EOPNOTSUPP;
  672. skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
  673. param_value);
  674. if (IS_ERR(skb))
  675. return PTR_ERR(skb);
  676. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
  677. }
  678. static inline int
  679. ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
  680. enum wmi_sta_ps_mode psmode)
  681. {
  682. struct sk_buff *skb;
  683. if (!ar->wmi.ops->gen_set_psmode)
  684. return -EOPNOTSUPP;
  685. skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
  686. if (IS_ERR(skb))
  687. return PTR_ERR(skb);
  688. return ath10k_wmi_cmd_send(ar, skb,
  689. ar->wmi.cmd->sta_powersave_mode_cmdid);
  690. }
  691. static inline int
  692. ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
  693. enum wmi_sta_powersave_param param_id, u32 value)
  694. {
  695. struct sk_buff *skb;
  696. if (!ar->wmi.ops->gen_set_sta_ps)
  697. return -EOPNOTSUPP;
  698. skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
  699. if (IS_ERR(skb))
  700. return PTR_ERR(skb);
  701. return ath10k_wmi_cmd_send(ar, skb,
  702. ar->wmi.cmd->sta_powersave_param_cmdid);
  703. }
  704. static inline int
  705. ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  706. enum wmi_ap_ps_peer_param param_id, u32 value)
  707. {
  708. struct sk_buff *skb;
  709. if (!ar->wmi.ops->gen_set_ap_ps)
  710. return -EOPNOTSUPP;
  711. skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
  712. if (IS_ERR(skb))
  713. return PTR_ERR(skb);
  714. return ath10k_wmi_cmd_send(ar, skb,
  715. ar->wmi.cmd->ap_ps_peer_param_cmdid);
  716. }
  717. static inline int
  718. ath10k_wmi_scan_chan_list(struct ath10k *ar,
  719. const struct wmi_scan_chan_list_arg *arg)
  720. {
  721. struct sk_buff *skb;
  722. if (!ar->wmi.ops->gen_scan_chan_list)
  723. return -EOPNOTSUPP;
  724. skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
  725. if (IS_ERR(skb))
  726. return PTR_ERR(skb);
  727. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
  728. }
  729. static inline int
  730. ath10k_wmi_peer_assoc(struct ath10k *ar,
  731. const struct wmi_peer_assoc_complete_arg *arg)
  732. {
  733. struct sk_buff *skb;
  734. if (!ar->wmi.ops->gen_peer_assoc)
  735. return -EOPNOTSUPP;
  736. skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
  737. if (IS_ERR(skb))
  738. return PTR_ERR(skb);
  739. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
  740. }
  741. static inline int
  742. ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
  743. const void *bcn, size_t bcn_len,
  744. u32 bcn_paddr, bool dtim_zero,
  745. bool deliver_cab)
  746. {
  747. struct sk_buff *skb;
  748. int ret;
  749. if (!ar->wmi.ops->gen_beacon_dma)
  750. return -EOPNOTSUPP;
  751. skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
  752. dtim_zero, deliver_cab);
  753. if (IS_ERR(skb))
  754. return PTR_ERR(skb);
  755. ret = ath10k_wmi_cmd_send_nowait(ar, skb,
  756. ar->wmi.cmd->pdev_send_bcn_cmdid);
  757. if (ret) {
  758. dev_kfree_skb(skb);
  759. return ret;
  760. }
  761. return 0;
  762. }
  763. static inline int
  764. ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
  765. const struct wmi_wmm_params_all_arg *arg)
  766. {
  767. struct sk_buff *skb;
  768. if (!ar->wmi.ops->gen_pdev_set_wmm)
  769. return -EOPNOTSUPP;
  770. skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
  771. if (IS_ERR(skb))
  772. return PTR_ERR(skb);
  773. return ath10k_wmi_cmd_send(ar, skb,
  774. ar->wmi.cmd->pdev_set_wmm_params_cmdid);
  775. }
  776. static inline int
  777. ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
  778. {
  779. struct sk_buff *skb;
  780. if (!ar->wmi.ops->gen_request_stats)
  781. return -EOPNOTSUPP;
  782. skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
  783. if (IS_ERR(skb))
  784. return PTR_ERR(skb);
  785. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
  786. }
  787. static inline int
  788. ath10k_wmi_force_fw_hang(struct ath10k *ar,
  789. enum wmi_force_fw_hang_type type, u32 delay_ms)
  790. {
  791. struct sk_buff *skb;
  792. if (!ar->wmi.ops->gen_force_fw_hang)
  793. return -EOPNOTSUPP;
  794. skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
  795. if (IS_ERR(skb))
  796. return PTR_ERR(skb);
  797. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
  798. }
  799. static inline int
  800. ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
  801. {
  802. struct sk_buff *skb;
  803. if (!ar->wmi.ops->gen_dbglog_cfg)
  804. return -EOPNOTSUPP;
  805. skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
  806. if (IS_ERR(skb))
  807. return PTR_ERR(skb);
  808. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
  809. }
  810. static inline int
  811. ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
  812. {
  813. struct sk_buff *skb;
  814. if (!ar->wmi.ops->gen_pktlog_enable)
  815. return -EOPNOTSUPP;
  816. skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
  817. if (IS_ERR(skb))
  818. return PTR_ERR(skb);
  819. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
  820. }
  821. static inline int
  822. ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
  823. {
  824. struct sk_buff *skb;
  825. if (!ar->wmi.ops->gen_pktlog_disable)
  826. return -EOPNOTSUPP;
  827. skb = ar->wmi.ops->gen_pktlog_disable(ar);
  828. if (IS_ERR(skb))
  829. return PTR_ERR(skb);
  830. return ath10k_wmi_cmd_send(ar, skb,
  831. ar->wmi.cmd->pdev_pktlog_disable_cmdid);
  832. }
  833. static inline int
  834. ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
  835. u32 next_offset, u32 enabled)
  836. {
  837. struct sk_buff *skb;
  838. if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
  839. return -EOPNOTSUPP;
  840. skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
  841. next_offset, enabled);
  842. if (IS_ERR(skb))
  843. return PTR_ERR(skb);
  844. return ath10k_wmi_cmd_send(ar, skb,
  845. ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
  846. }
  847. static inline int
  848. ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
  849. {
  850. struct sk_buff *skb;
  851. if (!ar->wmi.ops->gen_pdev_get_temperature)
  852. return -EOPNOTSUPP;
  853. skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
  854. if (IS_ERR(skb))
  855. return PTR_ERR(skb);
  856. return ath10k_wmi_cmd_send(ar, skb,
  857. ar->wmi.cmd->pdev_get_temperature_cmdid);
  858. }
  859. static inline int
  860. ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
  861. {
  862. struct sk_buff *skb;
  863. if (!ar->wmi.ops->gen_addba_clear_resp)
  864. return -EOPNOTSUPP;
  865. skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
  866. if (IS_ERR(skb))
  867. return PTR_ERR(skb);
  868. return ath10k_wmi_cmd_send(ar, skb,
  869. ar->wmi.cmd->addba_clear_resp_cmdid);
  870. }
  871. static inline int
  872. ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  873. u32 tid, u32 buf_size)
  874. {
  875. struct sk_buff *skb;
  876. if (!ar->wmi.ops->gen_addba_send)
  877. return -EOPNOTSUPP;
  878. skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
  879. if (IS_ERR(skb))
  880. return PTR_ERR(skb);
  881. return ath10k_wmi_cmd_send(ar, skb,
  882. ar->wmi.cmd->addba_send_cmdid);
  883. }
  884. static inline int
  885. ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  886. u32 tid, u32 status)
  887. {
  888. struct sk_buff *skb;
  889. if (!ar->wmi.ops->gen_addba_set_resp)
  890. return -EOPNOTSUPP;
  891. skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
  892. if (IS_ERR(skb))
  893. return PTR_ERR(skb);
  894. return ath10k_wmi_cmd_send(ar, skb,
  895. ar->wmi.cmd->addba_set_resp_cmdid);
  896. }
  897. static inline int
  898. ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  899. u32 tid, u32 initiator, u32 reason)
  900. {
  901. struct sk_buff *skb;
  902. if (!ar->wmi.ops->gen_delba_send)
  903. return -EOPNOTSUPP;
  904. skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
  905. reason);
  906. if (IS_ERR(skb))
  907. return PTR_ERR(skb);
  908. return ath10k_wmi_cmd_send(ar, skb,
  909. ar->wmi.cmd->delba_send_cmdid);
  910. }
  911. static inline int
  912. ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
  913. struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
  914. void *prb_ies, size_t prb_ies_len)
  915. {
  916. struct sk_buff *skb;
  917. if (!ar->wmi.ops->gen_bcn_tmpl)
  918. return -EOPNOTSUPP;
  919. skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
  920. prb_caps, prb_erp, prb_ies,
  921. prb_ies_len);
  922. if (IS_ERR(skb))
  923. return PTR_ERR(skb);
  924. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
  925. }
  926. static inline int
  927. ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
  928. {
  929. struct sk_buff *skb;
  930. if (!ar->wmi.ops->gen_prb_tmpl)
  931. return -EOPNOTSUPP;
  932. skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
  933. if (IS_ERR(skb))
  934. return PTR_ERR(skb);
  935. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
  936. }
  937. static inline int
  938. ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
  939. {
  940. struct sk_buff *skb;
  941. if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
  942. return -EOPNOTSUPP;
  943. skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
  944. if (IS_ERR(skb))
  945. return PTR_ERR(skb);
  946. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
  947. }
  948. static inline int
  949. ath10k_wmi_sta_keepalive(struct ath10k *ar,
  950. const struct wmi_sta_keepalive_arg *arg)
  951. {
  952. struct sk_buff *skb;
  953. u32 cmd_id;
  954. if (!ar->wmi.ops->gen_sta_keepalive)
  955. return -EOPNOTSUPP;
  956. skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
  957. if (IS_ERR(skb))
  958. return PTR_ERR(skb);
  959. cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
  960. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  961. }
  962. static inline int
  963. ath10k_wmi_wow_enable(struct ath10k *ar)
  964. {
  965. struct sk_buff *skb;
  966. u32 cmd_id;
  967. if (!ar->wmi.ops->gen_wow_enable)
  968. return -EOPNOTSUPP;
  969. skb = ar->wmi.ops->gen_wow_enable(ar);
  970. if (IS_ERR(skb))
  971. return PTR_ERR(skb);
  972. cmd_id = ar->wmi.cmd->wow_enable_cmdid;
  973. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  974. }
  975. static inline int
  976. ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
  977. enum wmi_wow_wakeup_event event,
  978. u32 enable)
  979. {
  980. struct sk_buff *skb;
  981. u32 cmd_id;
  982. if (!ar->wmi.ops->gen_wow_add_wakeup_event)
  983. return -EOPNOTSUPP;
  984. skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
  985. if (IS_ERR(skb))
  986. return PTR_ERR(skb);
  987. cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
  988. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  989. }
  990. static inline int
  991. ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
  992. {
  993. struct sk_buff *skb;
  994. u32 cmd_id;
  995. if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
  996. return -EOPNOTSUPP;
  997. skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
  998. if (IS_ERR(skb))
  999. return PTR_ERR(skb);
  1000. cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
  1001. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1002. }
  1003. static inline int
  1004. ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
  1005. const u8 *pattern, const u8 *mask,
  1006. int pattern_len, int pattern_offset)
  1007. {
  1008. struct sk_buff *skb;
  1009. u32 cmd_id;
  1010. if (!ar->wmi.ops->gen_wow_add_pattern)
  1011. return -EOPNOTSUPP;
  1012. skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
  1013. pattern, mask, pattern_len,
  1014. pattern_offset);
  1015. if (IS_ERR(skb))
  1016. return PTR_ERR(skb);
  1017. cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
  1018. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1019. }
  1020. static inline int
  1021. ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
  1022. {
  1023. struct sk_buff *skb;
  1024. u32 cmd_id;
  1025. if (!ar->wmi.ops->gen_wow_del_pattern)
  1026. return -EOPNOTSUPP;
  1027. skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
  1028. if (IS_ERR(skb))
  1029. return PTR_ERR(skb);
  1030. cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
  1031. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1032. }
  1033. static inline int
  1034. ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
  1035. enum wmi_tdls_state state)
  1036. {
  1037. struct sk_buff *skb;
  1038. if (!ar->wmi.ops->gen_update_fw_tdls_state)
  1039. return -EOPNOTSUPP;
  1040. skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
  1041. if (IS_ERR(skb))
  1042. return PTR_ERR(skb);
  1043. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
  1044. }
  1045. static inline int
  1046. ath10k_wmi_tdls_peer_update(struct ath10k *ar,
  1047. const struct wmi_tdls_peer_update_cmd_arg *arg,
  1048. const struct wmi_tdls_peer_capab_arg *cap,
  1049. const struct wmi_channel_arg *chan)
  1050. {
  1051. struct sk_buff *skb;
  1052. if (!ar->wmi.ops->gen_tdls_peer_update)
  1053. return -EOPNOTSUPP;
  1054. skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
  1055. if (IS_ERR(skb))
  1056. return PTR_ERR(skb);
  1057. return ath10k_wmi_cmd_send(ar, skb,
  1058. ar->wmi.cmd->tdls_peer_update_cmdid);
  1059. }
  1060. static inline int
  1061. ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
  1062. {
  1063. struct sk_buff *skb;
  1064. if (!ar->wmi.ops->gen_adaptive_qcs)
  1065. return -EOPNOTSUPP;
  1066. skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
  1067. if (IS_ERR(skb))
  1068. return PTR_ERR(skb);
  1069. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
  1070. }
  1071. static inline int
  1072. ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
  1073. {
  1074. struct sk_buff *skb;
  1075. if (!ar->wmi.ops->gen_pdev_get_tpc_config)
  1076. return -EOPNOTSUPP;
  1077. skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
  1078. if (IS_ERR(skb))
  1079. return PTR_ERR(skb);
  1080. return ath10k_wmi_cmd_send(ar, skb,
  1081. ar->wmi.cmd->pdev_get_tpc_config_cmdid);
  1082. }
  1083. static inline int
  1084. ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
  1085. char *buf)
  1086. {
  1087. if (!ar->wmi.ops->fw_stats_fill)
  1088. return -EOPNOTSUPP;
  1089. ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
  1090. return 0;
  1091. }
  1092. static inline int
  1093. ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
  1094. u32 detect_level, u32 detect_margin)
  1095. {
  1096. struct sk_buff *skb;
  1097. if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
  1098. return -EOPNOTSUPP;
  1099. skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
  1100. detect_level,
  1101. detect_margin);
  1102. if (IS_ERR(skb))
  1103. return PTR_ERR(skb);
  1104. return ath10k_wmi_cmd_send(ar, skb,
  1105. ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
  1106. }
  1107. static inline int
  1108. ath10k_wmi_ext_resource_config(struct ath10k *ar,
  1109. enum wmi_host_platform_type type,
  1110. u32 fw_feature_bitmap)
  1111. {
  1112. struct sk_buff *skb;
  1113. if (!ar->wmi.ops->ext_resource_config)
  1114. return -EOPNOTSUPP;
  1115. skb = ar->wmi.ops->ext_resource_config(ar, type,
  1116. fw_feature_bitmap);
  1117. if (IS_ERR(skb))
  1118. return PTR_ERR(skb);
  1119. return ath10k_wmi_cmd_send(ar, skb,
  1120. ar->wmi.cmd->ext_resource_cfg_cmdid);
  1121. }
  1122. static inline int
  1123. ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
  1124. {
  1125. if (!ar->wmi.ops->get_vdev_subtype)
  1126. return -EOPNOTSUPP;
  1127. return ar->wmi.ops->get_vdev_subtype(ar, subtype);
  1128. }
  1129. static inline int
  1130. ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
  1131. enum wmi_bss_survey_req_type type)
  1132. {
  1133. struct ath10k_wmi *wmi = &ar->wmi;
  1134. struct sk_buff *skb;
  1135. if (!wmi->ops->gen_pdev_bss_chan_info_req)
  1136. return -EOPNOTSUPP;
  1137. skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
  1138. if (IS_ERR(skb))
  1139. return PTR_ERR(skb);
  1140. return ath10k_wmi_cmd_send(ar, skb,
  1141. wmi->cmd->pdev_bss_chan_info_request_cmdid);
  1142. }
  1143. static inline int
  1144. ath10k_wmi_echo(struct ath10k *ar, u32 value)
  1145. {
  1146. struct ath10k_wmi *wmi = &ar->wmi;
  1147. struct sk_buff *skb;
  1148. if (!wmi->ops->gen_echo)
  1149. return -EOPNOTSUPP;
  1150. skb = wmi->ops->gen_echo(ar, value);
  1151. if (IS_ERR(skb))
  1152. return PTR_ERR(skb);
  1153. return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
  1154. }
  1155. static inline int
  1156. ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
  1157. {
  1158. struct sk_buff *skb;
  1159. if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
  1160. return -EOPNOTSUPP;
  1161. skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
  1162. if (IS_ERR(skb))
  1163. return PTR_ERR(skb);
  1164. return ath10k_wmi_cmd_send(ar, skb,
  1165. ar->wmi.cmd->pdev_get_tpc_table_cmdid);
  1166. }
  1167. #endif