wmi-ops.h 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #ifndef _WMI_OPS_H_
  18. #define _WMI_OPS_H_
  19. struct ath10k;
  20. struct sk_buff;
  21. struct wmi_ops {
  22. void (*rx)(struct ath10k *ar, struct sk_buff *skb);
  23. void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
  24. int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
  25. struct wmi_scan_ev_arg *arg);
  26. int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
  27. struct wmi_mgmt_rx_ev_arg *arg);
  28. int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
  29. struct wmi_ch_info_ev_arg *arg);
  30. int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
  31. struct wmi_vdev_start_ev_arg *arg);
  32. int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
  33. struct wmi_peer_kick_ev_arg *arg);
  34. int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
  35. struct wmi_swba_ev_arg *arg);
  36. int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
  37. struct wmi_phyerr_hdr_arg *arg);
  38. int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
  39. int left_len, struct wmi_phyerr_ev_arg *arg);
  40. int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
  41. struct wmi_svc_rdy_ev_arg *arg);
  42. int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
  43. struct wmi_rdy_ev_arg *arg);
  44. int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
  45. struct ath10k_fw_stats *stats);
  46. int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
  47. struct wmi_roam_ev_arg *arg);
  48. int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
  49. struct wmi_wow_ev_arg *arg);
  50. enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
  51. struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
  52. struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
  53. struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
  54. u16 rd5g, u16 ctl2g, u16 ctl5g,
  55. enum wmi_dfs_region dfs_reg);
  56. struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
  57. u32 value);
  58. struct sk_buff *(*gen_init)(struct ath10k *ar);
  59. struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
  60. const struct wmi_start_scan_arg *arg);
  61. struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
  62. const struct wmi_stop_scan_arg *arg);
  63. struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
  64. enum wmi_vdev_type type,
  65. enum wmi_vdev_subtype subtype,
  66. const u8 macaddr[ETH_ALEN]);
  67. struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
  68. struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
  69. const struct wmi_vdev_start_request_arg *arg,
  70. bool restart);
  71. struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
  72. struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
  73. const u8 *bssid);
  74. struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
  75. struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
  76. u32 param_id, u32 param_value);
  77. struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
  78. const struct wmi_vdev_install_key_arg *arg);
  79. struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
  80. const struct wmi_vdev_spectral_conf_arg *arg);
  81. struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
  82. u32 trigger, u32 enable);
  83. struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
  84. const struct wmi_wmm_params_all_arg *arg);
  85. struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
  86. const u8 peer_addr[ETH_ALEN],
  87. enum wmi_peer_type peer_type);
  88. struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
  89. const u8 peer_addr[ETH_ALEN]);
  90. struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
  91. const u8 peer_addr[ETH_ALEN],
  92. u32 tid_bitmap);
  93. struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
  94. const u8 *peer_addr,
  95. enum wmi_peer_param param_id,
  96. u32 param_value);
  97. struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
  98. const struct wmi_peer_assoc_complete_arg *arg);
  99. struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
  100. enum wmi_sta_ps_mode psmode);
  101. struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
  102. enum wmi_sta_powersave_param param_id,
  103. u32 value);
  104. struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
  105. const u8 *mac,
  106. enum wmi_ap_ps_peer_param param_id,
  107. u32 value);
  108. struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
  109. const struct wmi_scan_chan_list_arg *arg);
  110. struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
  111. const void *bcn, size_t bcn_len,
  112. u32 bcn_paddr, bool dtim_zero,
  113. bool deliver_cab);
  114. struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
  115. const struct wmi_wmm_params_all_arg *arg);
  116. struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
  117. struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
  118. enum wmi_force_fw_hang_type type,
  119. u32 delay_ms);
  120. struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
  121. struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
  122. u32 log_level);
  123. struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
  124. struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
  125. struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
  126. u32 period, u32 duration,
  127. u32 next_offset,
  128. u32 enabled);
  129. struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
  130. struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
  131. const u8 *mac);
  132. struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
  133. const u8 *mac, u32 tid, u32 buf_size);
  134. struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
  135. const u8 *mac, u32 tid,
  136. u32 status);
  137. struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
  138. const u8 *mac, u32 tid, u32 initiator,
  139. u32 reason);
  140. struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
  141. u32 tim_ie_offset, struct sk_buff *bcn,
  142. u32 prb_caps, u32 prb_erp,
  143. void *prb_ies, size_t prb_ies_len);
  144. struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
  145. struct sk_buff *bcn);
  146. struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
  147. const u8 *p2p_ie);
  148. struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
  149. const u8 peer_addr[ETH_ALEN],
  150. const struct wmi_sta_uapsd_auto_trig_arg *args,
  151. u32 num_ac);
  152. struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
  153. const struct wmi_sta_keepalive_arg *arg);
  154. struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
  155. struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
  156. enum wmi_wow_wakeup_event event,
  157. u32 enable);
  158. struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
  159. struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
  160. u32 pattern_id,
  161. const u8 *pattern,
  162. const u8 *mask,
  163. int pattern_len,
  164. int pattern_offset);
  165. struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
  166. u32 pattern_id);
  167. struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
  168. u32 vdev_id,
  169. enum wmi_tdls_state state);
  170. struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
  171. const struct wmi_tdls_peer_update_cmd_arg *arg,
  172. const struct wmi_tdls_peer_capab_arg *cap,
  173. const struct wmi_channel_arg *chan);
  174. struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
  175. struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
  176. u32 param);
  177. void (*fw_stats_fill)(struct ath10k *ar,
  178. struct ath10k_fw_stats *fw_stats,
  179. char *buf);
  180. struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
  181. u8 enable,
  182. u32 detect_level,
  183. u32 detect_margin);
  184. int (*get_vdev_subtype)(struct ath10k *ar,
  185. enum wmi_vdev_subtype subtype);
  186. };
  187. int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
  188. static inline int
  189. ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
  190. {
  191. if (WARN_ON_ONCE(!ar->wmi.ops->rx))
  192. return -EOPNOTSUPP;
  193. ar->wmi.ops->rx(ar, skb);
  194. return 0;
  195. }
  196. static inline int
  197. ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
  198. size_t len)
  199. {
  200. if (!ar->wmi.ops->map_svc)
  201. return -EOPNOTSUPP;
  202. ar->wmi.ops->map_svc(in, out, len);
  203. return 0;
  204. }
  205. static inline int
  206. ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
  207. struct wmi_scan_ev_arg *arg)
  208. {
  209. if (!ar->wmi.ops->pull_scan)
  210. return -EOPNOTSUPP;
  211. return ar->wmi.ops->pull_scan(ar, skb, arg);
  212. }
  213. static inline int
  214. ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
  215. struct wmi_mgmt_rx_ev_arg *arg)
  216. {
  217. if (!ar->wmi.ops->pull_mgmt_rx)
  218. return -EOPNOTSUPP;
  219. return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
  220. }
  221. static inline int
  222. ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
  223. struct wmi_ch_info_ev_arg *arg)
  224. {
  225. if (!ar->wmi.ops->pull_ch_info)
  226. return -EOPNOTSUPP;
  227. return ar->wmi.ops->pull_ch_info(ar, skb, arg);
  228. }
  229. static inline int
  230. ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
  231. struct wmi_vdev_start_ev_arg *arg)
  232. {
  233. if (!ar->wmi.ops->pull_vdev_start)
  234. return -EOPNOTSUPP;
  235. return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
  236. }
  237. static inline int
  238. ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
  239. struct wmi_peer_kick_ev_arg *arg)
  240. {
  241. if (!ar->wmi.ops->pull_peer_kick)
  242. return -EOPNOTSUPP;
  243. return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
  244. }
  245. static inline int
  246. ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
  247. struct wmi_swba_ev_arg *arg)
  248. {
  249. if (!ar->wmi.ops->pull_swba)
  250. return -EOPNOTSUPP;
  251. return ar->wmi.ops->pull_swba(ar, skb, arg);
  252. }
  253. static inline int
  254. ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
  255. struct wmi_phyerr_hdr_arg *arg)
  256. {
  257. if (!ar->wmi.ops->pull_phyerr_hdr)
  258. return -EOPNOTSUPP;
  259. return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
  260. }
  261. static inline int
  262. ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
  263. int left_len, struct wmi_phyerr_ev_arg *arg)
  264. {
  265. if (!ar->wmi.ops->pull_phyerr)
  266. return -EOPNOTSUPP;
  267. return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
  268. }
  269. static inline int
  270. ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
  271. struct wmi_svc_rdy_ev_arg *arg)
  272. {
  273. if (!ar->wmi.ops->pull_svc_rdy)
  274. return -EOPNOTSUPP;
  275. return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
  276. }
  277. static inline int
  278. ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
  279. struct wmi_rdy_ev_arg *arg)
  280. {
  281. if (!ar->wmi.ops->pull_rdy)
  282. return -EOPNOTSUPP;
  283. return ar->wmi.ops->pull_rdy(ar, skb, arg);
  284. }
  285. static inline int
  286. ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
  287. struct ath10k_fw_stats *stats)
  288. {
  289. if (!ar->wmi.ops->pull_fw_stats)
  290. return -EOPNOTSUPP;
  291. return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
  292. }
  293. static inline int
  294. ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
  295. struct wmi_roam_ev_arg *arg)
  296. {
  297. if (!ar->wmi.ops->pull_roam_ev)
  298. return -EOPNOTSUPP;
  299. return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
  300. }
  301. static inline int
  302. ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
  303. struct wmi_wow_ev_arg *arg)
  304. {
  305. if (!ar->wmi.ops->pull_wow_event)
  306. return -EOPNOTSUPP;
  307. return ar->wmi.ops->pull_wow_event(ar, skb, arg);
  308. }
  309. static inline enum wmi_txbf_conf
  310. ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
  311. {
  312. if (!ar->wmi.ops->get_txbf_conf_scheme)
  313. return WMI_TXBF_CONF_UNSUPPORTED;
  314. return ar->wmi.ops->get_txbf_conf_scheme(ar);
  315. }
  316. static inline int
  317. ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
  318. {
  319. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
  320. struct sk_buff *skb;
  321. int ret;
  322. if (!ar->wmi.ops->gen_mgmt_tx)
  323. return -EOPNOTSUPP;
  324. skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
  325. if (IS_ERR(skb))
  326. return PTR_ERR(skb);
  327. ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
  328. if (ret)
  329. return ret;
  330. /* FIXME There's no ACK event for Management Tx. This probably
  331. * shouldn't be called here either. */
  332. info->flags |= IEEE80211_TX_STAT_ACK;
  333. ieee80211_tx_status_irqsafe(ar->hw, msdu);
  334. return 0;
  335. }
  336. static inline int
  337. ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
  338. u16 ctl2g, u16 ctl5g,
  339. enum wmi_dfs_region dfs_reg)
  340. {
  341. struct sk_buff *skb;
  342. if (!ar->wmi.ops->gen_pdev_set_rd)
  343. return -EOPNOTSUPP;
  344. skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
  345. dfs_reg);
  346. if (IS_ERR(skb))
  347. return PTR_ERR(skb);
  348. return ath10k_wmi_cmd_send(ar, skb,
  349. ar->wmi.cmd->pdev_set_regdomain_cmdid);
  350. }
  351. static inline int
  352. ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
  353. {
  354. struct sk_buff *skb;
  355. if (!ar->wmi.ops->gen_pdev_suspend)
  356. return -EOPNOTSUPP;
  357. skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
  358. if (IS_ERR(skb))
  359. return PTR_ERR(skb);
  360. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
  361. }
  362. static inline int
  363. ath10k_wmi_pdev_resume_target(struct ath10k *ar)
  364. {
  365. struct sk_buff *skb;
  366. if (!ar->wmi.ops->gen_pdev_resume)
  367. return -EOPNOTSUPP;
  368. skb = ar->wmi.ops->gen_pdev_resume(ar);
  369. if (IS_ERR(skb))
  370. return PTR_ERR(skb);
  371. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
  372. }
  373. static inline int
  374. ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
  375. {
  376. struct sk_buff *skb;
  377. if (!ar->wmi.ops->gen_pdev_set_param)
  378. return -EOPNOTSUPP;
  379. skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
  380. if (IS_ERR(skb))
  381. return PTR_ERR(skb);
  382. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
  383. }
  384. static inline int
  385. ath10k_wmi_cmd_init(struct ath10k *ar)
  386. {
  387. struct sk_buff *skb;
  388. if (!ar->wmi.ops->gen_init)
  389. return -EOPNOTSUPP;
  390. skb = ar->wmi.ops->gen_init(ar);
  391. if (IS_ERR(skb))
  392. return PTR_ERR(skb);
  393. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
  394. }
  395. static inline int
  396. ath10k_wmi_start_scan(struct ath10k *ar,
  397. const struct wmi_start_scan_arg *arg)
  398. {
  399. struct sk_buff *skb;
  400. if (!ar->wmi.ops->gen_start_scan)
  401. return -EOPNOTSUPP;
  402. skb = ar->wmi.ops->gen_start_scan(ar, arg);
  403. if (IS_ERR(skb))
  404. return PTR_ERR(skb);
  405. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
  406. }
  407. static inline int
  408. ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
  409. {
  410. struct sk_buff *skb;
  411. if (!ar->wmi.ops->gen_stop_scan)
  412. return -EOPNOTSUPP;
  413. skb = ar->wmi.ops->gen_stop_scan(ar, arg);
  414. if (IS_ERR(skb))
  415. return PTR_ERR(skb);
  416. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
  417. }
  418. static inline int
  419. ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
  420. enum wmi_vdev_type type,
  421. enum wmi_vdev_subtype subtype,
  422. const u8 macaddr[ETH_ALEN])
  423. {
  424. struct sk_buff *skb;
  425. if (!ar->wmi.ops->gen_vdev_create)
  426. return -EOPNOTSUPP;
  427. skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
  428. if (IS_ERR(skb))
  429. return PTR_ERR(skb);
  430. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
  431. }
  432. static inline int
  433. ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
  434. {
  435. struct sk_buff *skb;
  436. if (!ar->wmi.ops->gen_vdev_delete)
  437. return -EOPNOTSUPP;
  438. skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
  439. if (IS_ERR(skb))
  440. return PTR_ERR(skb);
  441. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
  442. }
  443. static inline int
  444. ath10k_wmi_vdev_start(struct ath10k *ar,
  445. const struct wmi_vdev_start_request_arg *arg)
  446. {
  447. struct sk_buff *skb;
  448. if (!ar->wmi.ops->gen_vdev_start)
  449. return -EOPNOTSUPP;
  450. skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
  451. if (IS_ERR(skb))
  452. return PTR_ERR(skb);
  453. return ath10k_wmi_cmd_send(ar, skb,
  454. ar->wmi.cmd->vdev_start_request_cmdid);
  455. }
  456. static inline int
  457. ath10k_wmi_vdev_restart(struct ath10k *ar,
  458. const struct wmi_vdev_start_request_arg *arg)
  459. {
  460. struct sk_buff *skb;
  461. if (!ar->wmi.ops->gen_vdev_start)
  462. return -EOPNOTSUPP;
  463. skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
  464. if (IS_ERR(skb))
  465. return PTR_ERR(skb);
  466. return ath10k_wmi_cmd_send(ar, skb,
  467. ar->wmi.cmd->vdev_restart_request_cmdid);
  468. }
  469. static inline int
  470. ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
  471. {
  472. struct sk_buff *skb;
  473. if (!ar->wmi.ops->gen_vdev_stop)
  474. return -EOPNOTSUPP;
  475. skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
  476. if (IS_ERR(skb))
  477. return PTR_ERR(skb);
  478. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
  479. }
  480. static inline int
  481. ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
  482. {
  483. struct sk_buff *skb;
  484. if (!ar->wmi.ops->gen_vdev_up)
  485. return -EOPNOTSUPP;
  486. skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
  487. if (IS_ERR(skb))
  488. return PTR_ERR(skb);
  489. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
  490. }
  491. static inline int
  492. ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
  493. {
  494. struct sk_buff *skb;
  495. if (!ar->wmi.ops->gen_vdev_down)
  496. return -EOPNOTSUPP;
  497. skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
  498. if (IS_ERR(skb))
  499. return PTR_ERR(skb);
  500. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
  501. }
  502. static inline int
  503. ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
  504. u32 param_value)
  505. {
  506. struct sk_buff *skb;
  507. if (!ar->wmi.ops->gen_vdev_set_param)
  508. return -EOPNOTSUPP;
  509. skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
  510. param_value);
  511. if (IS_ERR(skb))
  512. return PTR_ERR(skb);
  513. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
  514. }
  515. static inline int
  516. ath10k_wmi_vdev_install_key(struct ath10k *ar,
  517. const struct wmi_vdev_install_key_arg *arg)
  518. {
  519. struct sk_buff *skb;
  520. if (!ar->wmi.ops->gen_vdev_install_key)
  521. return -EOPNOTSUPP;
  522. skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
  523. if (IS_ERR(skb))
  524. return PTR_ERR(skb);
  525. return ath10k_wmi_cmd_send(ar, skb,
  526. ar->wmi.cmd->vdev_install_key_cmdid);
  527. }
  528. static inline int
  529. ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
  530. const struct wmi_vdev_spectral_conf_arg *arg)
  531. {
  532. struct sk_buff *skb;
  533. u32 cmd_id;
  534. skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
  535. if (IS_ERR(skb))
  536. return PTR_ERR(skb);
  537. cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
  538. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  539. }
  540. static inline int
  541. ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
  542. u32 enable)
  543. {
  544. struct sk_buff *skb;
  545. u32 cmd_id;
  546. skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
  547. enable);
  548. if (IS_ERR(skb))
  549. return PTR_ERR(skb);
  550. cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
  551. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  552. }
  553. static inline int
  554. ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
  555. const u8 peer_addr[ETH_ALEN],
  556. const struct wmi_sta_uapsd_auto_trig_arg *args,
  557. u32 num_ac)
  558. {
  559. struct sk_buff *skb;
  560. u32 cmd_id;
  561. if (!ar->wmi.ops->gen_vdev_sta_uapsd)
  562. return -EOPNOTSUPP;
  563. skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
  564. num_ac);
  565. if (IS_ERR(skb))
  566. return PTR_ERR(skb);
  567. cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
  568. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  569. }
  570. static inline int
  571. ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
  572. const struct wmi_wmm_params_all_arg *arg)
  573. {
  574. struct sk_buff *skb;
  575. u32 cmd_id;
  576. skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
  577. if (IS_ERR(skb))
  578. return PTR_ERR(skb);
  579. cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
  580. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  581. }
  582. static inline int
  583. ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
  584. const u8 peer_addr[ETH_ALEN],
  585. enum wmi_peer_type peer_type)
  586. {
  587. struct sk_buff *skb;
  588. if (!ar->wmi.ops->gen_peer_create)
  589. return -EOPNOTSUPP;
  590. skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
  591. if (IS_ERR(skb))
  592. return PTR_ERR(skb);
  593. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
  594. }
  595. static inline int
  596. ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
  597. const u8 peer_addr[ETH_ALEN])
  598. {
  599. struct sk_buff *skb;
  600. if (!ar->wmi.ops->gen_peer_delete)
  601. return -EOPNOTSUPP;
  602. skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
  603. if (IS_ERR(skb))
  604. return PTR_ERR(skb);
  605. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
  606. }
  607. static inline int
  608. ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
  609. const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  610. {
  611. struct sk_buff *skb;
  612. if (!ar->wmi.ops->gen_peer_flush)
  613. return -EOPNOTSUPP;
  614. skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
  615. if (IS_ERR(skb))
  616. return PTR_ERR(skb);
  617. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
  618. }
  619. static inline int
  620. ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
  621. enum wmi_peer_param param_id, u32 param_value)
  622. {
  623. struct sk_buff *skb;
  624. if (!ar->wmi.ops->gen_peer_set_param)
  625. return -EOPNOTSUPP;
  626. skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
  627. param_value);
  628. if (IS_ERR(skb))
  629. return PTR_ERR(skb);
  630. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
  631. }
  632. static inline int
  633. ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
  634. enum wmi_sta_ps_mode psmode)
  635. {
  636. struct sk_buff *skb;
  637. if (!ar->wmi.ops->gen_set_psmode)
  638. return -EOPNOTSUPP;
  639. skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
  640. if (IS_ERR(skb))
  641. return PTR_ERR(skb);
  642. return ath10k_wmi_cmd_send(ar, skb,
  643. ar->wmi.cmd->sta_powersave_mode_cmdid);
  644. }
  645. static inline int
  646. ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
  647. enum wmi_sta_powersave_param param_id, u32 value)
  648. {
  649. struct sk_buff *skb;
  650. if (!ar->wmi.ops->gen_set_sta_ps)
  651. return -EOPNOTSUPP;
  652. skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
  653. if (IS_ERR(skb))
  654. return PTR_ERR(skb);
  655. return ath10k_wmi_cmd_send(ar, skb,
  656. ar->wmi.cmd->sta_powersave_param_cmdid);
  657. }
  658. static inline int
  659. ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  660. enum wmi_ap_ps_peer_param param_id, u32 value)
  661. {
  662. struct sk_buff *skb;
  663. if (!ar->wmi.ops->gen_set_ap_ps)
  664. return -EOPNOTSUPP;
  665. skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
  666. if (IS_ERR(skb))
  667. return PTR_ERR(skb);
  668. return ath10k_wmi_cmd_send(ar, skb,
  669. ar->wmi.cmd->ap_ps_peer_param_cmdid);
  670. }
  671. static inline int
  672. ath10k_wmi_scan_chan_list(struct ath10k *ar,
  673. const struct wmi_scan_chan_list_arg *arg)
  674. {
  675. struct sk_buff *skb;
  676. if (!ar->wmi.ops->gen_scan_chan_list)
  677. return -EOPNOTSUPP;
  678. skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
  679. if (IS_ERR(skb))
  680. return PTR_ERR(skb);
  681. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
  682. }
  683. static inline int
  684. ath10k_wmi_peer_assoc(struct ath10k *ar,
  685. const struct wmi_peer_assoc_complete_arg *arg)
  686. {
  687. struct sk_buff *skb;
  688. if (!ar->wmi.ops->gen_peer_assoc)
  689. return -EOPNOTSUPP;
  690. skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
  691. if (IS_ERR(skb))
  692. return PTR_ERR(skb);
  693. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
  694. }
  695. static inline int
  696. ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
  697. const void *bcn, size_t bcn_len,
  698. u32 bcn_paddr, bool dtim_zero,
  699. bool deliver_cab)
  700. {
  701. struct sk_buff *skb;
  702. int ret;
  703. if (!ar->wmi.ops->gen_beacon_dma)
  704. return -EOPNOTSUPP;
  705. skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
  706. dtim_zero, deliver_cab);
  707. if (IS_ERR(skb))
  708. return PTR_ERR(skb);
  709. ret = ath10k_wmi_cmd_send_nowait(ar, skb,
  710. ar->wmi.cmd->pdev_send_bcn_cmdid);
  711. if (ret) {
  712. dev_kfree_skb(skb);
  713. return ret;
  714. }
  715. return 0;
  716. }
  717. static inline int
  718. ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
  719. const struct wmi_wmm_params_all_arg *arg)
  720. {
  721. struct sk_buff *skb;
  722. if (!ar->wmi.ops->gen_pdev_set_wmm)
  723. return -EOPNOTSUPP;
  724. skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
  725. if (IS_ERR(skb))
  726. return PTR_ERR(skb);
  727. return ath10k_wmi_cmd_send(ar, skb,
  728. ar->wmi.cmd->pdev_set_wmm_params_cmdid);
  729. }
  730. static inline int
  731. ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
  732. {
  733. struct sk_buff *skb;
  734. if (!ar->wmi.ops->gen_request_stats)
  735. return -EOPNOTSUPP;
  736. skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
  737. if (IS_ERR(skb))
  738. return PTR_ERR(skb);
  739. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
  740. }
  741. static inline int
  742. ath10k_wmi_force_fw_hang(struct ath10k *ar,
  743. enum wmi_force_fw_hang_type type, u32 delay_ms)
  744. {
  745. struct sk_buff *skb;
  746. if (!ar->wmi.ops->gen_force_fw_hang)
  747. return -EOPNOTSUPP;
  748. skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
  749. if (IS_ERR(skb))
  750. return PTR_ERR(skb);
  751. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
  752. }
  753. static inline int
  754. ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
  755. {
  756. struct sk_buff *skb;
  757. if (!ar->wmi.ops->gen_dbglog_cfg)
  758. return -EOPNOTSUPP;
  759. skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
  760. if (IS_ERR(skb))
  761. return PTR_ERR(skb);
  762. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
  763. }
  764. static inline int
  765. ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
  766. {
  767. struct sk_buff *skb;
  768. if (!ar->wmi.ops->gen_pktlog_enable)
  769. return -EOPNOTSUPP;
  770. skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
  771. if (IS_ERR(skb))
  772. return PTR_ERR(skb);
  773. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
  774. }
  775. static inline int
  776. ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
  777. {
  778. struct sk_buff *skb;
  779. if (!ar->wmi.ops->gen_pktlog_disable)
  780. return -EOPNOTSUPP;
  781. skb = ar->wmi.ops->gen_pktlog_disable(ar);
  782. if (IS_ERR(skb))
  783. return PTR_ERR(skb);
  784. return ath10k_wmi_cmd_send(ar, skb,
  785. ar->wmi.cmd->pdev_pktlog_disable_cmdid);
  786. }
  787. static inline int
  788. ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
  789. u32 next_offset, u32 enabled)
  790. {
  791. struct sk_buff *skb;
  792. if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
  793. return -EOPNOTSUPP;
  794. skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
  795. next_offset, enabled);
  796. if (IS_ERR(skb))
  797. return PTR_ERR(skb);
  798. return ath10k_wmi_cmd_send(ar, skb,
  799. ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
  800. }
  801. static inline int
  802. ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
  803. {
  804. struct sk_buff *skb;
  805. if (!ar->wmi.ops->gen_pdev_get_temperature)
  806. return -EOPNOTSUPP;
  807. skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
  808. if (IS_ERR(skb))
  809. return PTR_ERR(skb);
  810. return ath10k_wmi_cmd_send(ar, skb,
  811. ar->wmi.cmd->pdev_get_temperature_cmdid);
  812. }
  813. static inline int
  814. ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
  815. {
  816. struct sk_buff *skb;
  817. if (!ar->wmi.ops->gen_addba_clear_resp)
  818. return -EOPNOTSUPP;
  819. skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
  820. if (IS_ERR(skb))
  821. return PTR_ERR(skb);
  822. return ath10k_wmi_cmd_send(ar, skb,
  823. ar->wmi.cmd->addba_clear_resp_cmdid);
  824. }
  825. static inline int
  826. ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  827. u32 tid, u32 buf_size)
  828. {
  829. struct sk_buff *skb;
  830. if (!ar->wmi.ops->gen_addba_send)
  831. return -EOPNOTSUPP;
  832. skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
  833. if (IS_ERR(skb))
  834. return PTR_ERR(skb);
  835. return ath10k_wmi_cmd_send(ar, skb,
  836. ar->wmi.cmd->addba_send_cmdid);
  837. }
  838. static inline int
  839. ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  840. u32 tid, u32 status)
  841. {
  842. struct sk_buff *skb;
  843. if (!ar->wmi.ops->gen_addba_set_resp)
  844. return -EOPNOTSUPP;
  845. skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
  846. if (IS_ERR(skb))
  847. return PTR_ERR(skb);
  848. return ath10k_wmi_cmd_send(ar, skb,
  849. ar->wmi.cmd->addba_set_resp_cmdid);
  850. }
  851. static inline int
  852. ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  853. u32 tid, u32 initiator, u32 reason)
  854. {
  855. struct sk_buff *skb;
  856. if (!ar->wmi.ops->gen_delba_send)
  857. return -EOPNOTSUPP;
  858. skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
  859. reason);
  860. if (IS_ERR(skb))
  861. return PTR_ERR(skb);
  862. return ath10k_wmi_cmd_send(ar, skb,
  863. ar->wmi.cmd->delba_send_cmdid);
  864. }
  865. static inline int
  866. ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
  867. struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
  868. void *prb_ies, size_t prb_ies_len)
  869. {
  870. struct sk_buff *skb;
  871. if (!ar->wmi.ops->gen_bcn_tmpl)
  872. return -EOPNOTSUPP;
  873. skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
  874. prb_caps, prb_erp, prb_ies,
  875. prb_ies_len);
  876. if (IS_ERR(skb))
  877. return PTR_ERR(skb);
  878. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
  879. }
  880. static inline int
  881. ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
  882. {
  883. struct sk_buff *skb;
  884. if (!ar->wmi.ops->gen_prb_tmpl)
  885. return -EOPNOTSUPP;
  886. skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
  887. if (IS_ERR(skb))
  888. return PTR_ERR(skb);
  889. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
  890. }
  891. static inline int
  892. ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
  893. {
  894. struct sk_buff *skb;
  895. if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
  896. return -EOPNOTSUPP;
  897. skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
  898. if (IS_ERR(skb))
  899. return PTR_ERR(skb);
  900. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
  901. }
  902. static inline int
  903. ath10k_wmi_sta_keepalive(struct ath10k *ar,
  904. const struct wmi_sta_keepalive_arg *arg)
  905. {
  906. struct sk_buff *skb;
  907. u32 cmd_id;
  908. if (!ar->wmi.ops->gen_sta_keepalive)
  909. return -EOPNOTSUPP;
  910. skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
  911. if (IS_ERR(skb))
  912. return PTR_ERR(skb);
  913. cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
  914. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  915. }
  916. static inline int
  917. ath10k_wmi_wow_enable(struct ath10k *ar)
  918. {
  919. struct sk_buff *skb;
  920. u32 cmd_id;
  921. if (!ar->wmi.ops->gen_wow_enable)
  922. return -EOPNOTSUPP;
  923. skb = ar->wmi.ops->gen_wow_enable(ar);
  924. if (IS_ERR(skb))
  925. return PTR_ERR(skb);
  926. cmd_id = ar->wmi.cmd->wow_enable_cmdid;
  927. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  928. }
  929. static inline int
  930. ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
  931. enum wmi_wow_wakeup_event event,
  932. u32 enable)
  933. {
  934. struct sk_buff *skb;
  935. u32 cmd_id;
  936. if (!ar->wmi.ops->gen_wow_add_wakeup_event)
  937. return -EOPNOTSUPP;
  938. skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
  939. if (IS_ERR(skb))
  940. return PTR_ERR(skb);
  941. cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
  942. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  943. }
  944. static inline int
  945. ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
  946. {
  947. struct sk_buff *skb;
  948. u32 cmd_id;
  949. if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
  950. return -EOPNOTSUPP;
  951. skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
  952. if (IS_ERR(skb))
  953. return PTR_ERR(skb);
  954. cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
  955. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  956. }
  957. static inline int
  958. ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
  959. const u8 *pattern, const u8 *mask,
  960. int pattern_len, int pattern_offset)
  961. {
  962. struct sk_buff *skb;
  963. u32 cmd_id;
  964. if (!ar->wmi.ops->gen_wow_add_pattern)
  965. return -EOPNOTSUPP;
  966. skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
  967. pattern, mask, pattern_len,
  968. pattern_offset);
  969. if (IS_ERR(skb))
  970. return PTR_ERR(skb);
  971. cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
  972. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  973. }
  974. static inline int
  975. ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
  976. {
  977. struct sk_buff *skb;
  978. u32 cmd_id;
  979. if (!ar->wmi.ops->gen_wow_del_pattern)
  980. return -EOPNOTSUPP;
  981. skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
  982. if (IS_ERR(skb))
  983. return PTR_ERR(skb);
  984. cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
  985. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  986. }
  987. static inline int
  988. ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
  989. enum wmi_tdls_state state)
  990. {
  991. struct sk_buff *skb;
  992. if (!ar->wmi.ops->gen_update_fw_tdls_state)
  993. return -EOPNOTSUPP;
  994. skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
  995. if (IS_ERR(skb))
  996. return PTR_ERR(skb);
  997. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
  998. }
  999. static inline int
  1000. ath10k_wmi_tdls_peer_update(struct ath10k *ar,
  1001. const struct wmi_tdls_peer_update_cmd_arg *arg,
  1002. const struct wmi_tdls_peer_capab_arg *cap,
  1003. const struct wmi_channel_arg *chan)
  1004. {
  1005. struct sk_buff *skb;
  1006. if (!ar->wmi.ops->gen_tdls_peer_update)
  1007. return -EOPNOTSUPP;
  1008. skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
  1009. if (IS_ERR(skb))
  1010. return PTR_ERR(skb);
  1011. return ath10k_wmi_cmd_send(ar, skb,
  1012. ar->wmi.cmd->tdls_peer_update_cmdid);
  1013. }
  1014. static inline int
  1015. ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
  1016. {
  1017. struct sk_buff *skb;
  1018. if (!ar->wmi.ops->gen_adaptive_qcs)
  1019. return -EOPNOTSUPP;
  1020. skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
  1021. if (IS_ERR(skb))
  1022. return PTR_ERR(skb);
  1023. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
  1024. }
  1025. static inline int
  1026. ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
  1027. {
  1028. struct sk_buff *skb;
  1029. if (!ar->wmi.ops->gen_pdev_get_tpc_config)
  1030. return -EOPNOTSUPP;
  1031. skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
  1032. if (IS_ERR(skb))
  1033. return PTR_ERR(skb);
  1034. return ath10k_wmi_cmd_send(ar, skb,
  1035. ar->wmi.cmd->pdev_get_tpc_config_cmdid);
  1036. }
  1037. static inline int
  1038. ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
  1039. char *buf)
  1040. {
  1041. if (!ar->wmi.ops->fw_stats_fill)
  1042. return -EOPNOTSUPP;
  1043. ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
  1044. return 0;
  1045. }
  1046. static inline int
  1047. ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
  1048. u32 detect_level, u32 detect_margin)
  1049. {
  1050. struct sk_buff *skb;
  1051. if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
  1052. return -EOPNOTSUPP;
  1053. skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
  1054. detect_level,
  1055. detect_margin);
  1056. if (IS_ERR(skb))
  1057. return PTR_ERR(skb);
  1058. return ath10k_wmi_cmd_send(ar, skb,
  1059. ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
  1060. }
  1061. static inline int
  1062. ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
  1063. {
  1064. if (!ar->wmi.ops->get_vdev_subtype)
  1065. return -EOPNOTSUPP;
  1066. return ar->wmi.ops->get_vdev_subtype(ar, subtype);
  1067. }
  1068. #endif