wmi-ops.h 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #ifndef _WMI_OPS_H_
  18. #define _WMI_OPS_H_
  19. struct ath10k;
  20. struct sk_buff;
  21. struct wmi_ops {
  22. void (*rx)(struct ath10k *ar, struct sk_buff *skb);
  23. void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
  24. int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
  25. struct wmi_scan_ev_arg *arg);
  26. int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
  27. struct wmi_mgmt_rx_ev_arg *arg);
  28. int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
  29. struct wmi_ch_info_ev_arg *arg);
  30. int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
  31. struct wmi_vdev_start_ev_arg *arg);
  32. int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
  33. struct wmi_peer_kick_ev_arg *arg);
  34. int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
  35. struct wmi_swba_ev_arg *arg);
  36. int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
  37. struct wmi_phyerr_hdr_arg *arg);
  38. int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
  39. int left_len, struct wmi_phyerr_ev_arg *arg);
  40. int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
  41. struct wmi_svc_rdy_ev_arg *arg);
  42. int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
  43. struct wmi_rdy_ev_arg *arg);
  44. int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
  45. struct ath10k_fw_stats *stats);
  46. int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
  47. struct wmi_roam_ev_arg *arg);
  48. int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
  49. struct wmi_wow_ev_arg *arg);
  50. enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
  51. struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
  52. struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
  53. struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
  54. u16 rd5g, u16 ctl2g, u16 ctl5g,
  55. enum wmi_dfs_region dfs_reg);
  56. struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
  57. u32 value);
  58. struct sk_buff *(*gen_init)(struct ath10k *ar);
  59. struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
  60. const struct wmi_start_scan_arg *arg);
  61. struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
  62. const struct wmi_stop_scan_arg *arg);
  63. struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
  64. enum wmi_vdev_type type,
  65. enum wmi_vdev_subtype subtype,
  66. const u8 macaddr[ETH_ALEN]);
  67. struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
  68. struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
  69. const struct wmi_vdev_start_request_arg *arg,
  70. bool restart);
  71. struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
  72. struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
  73. const u8 *bssid);
  74. struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
  75. struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
  76. u32 param_id, u32 param_value);
  77. struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
  78. const struct wmi_vdev_install_key_arg *arg);
  79. struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
  80. const struct wmi_vdev_spectral_conf_arg *arg);
  81. struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
  82. u32 trigger, u32 enable);
  83. struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
  84. const struct wmi_wmm_params_all_arg *arg);
  85. struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
  86. const u8 peer_addr[ETH_ALEN],
  87. enum wmi_peer_type peer_type);
  88. struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
  89. const u8 peer_addr[ETH_ALEN]);
  90. struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
  91. const u8 peer_addr[ETH_ALEN],
  92. u32 tid_bitmap);
  93. struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
  94. const u8 *peer_addr,
  95. enum wmi_peer_param param_id,
  96. u32 param_value);
  97. struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
  98. const struct wmi_peer_assoc_complete_arg *arg);
  99. struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
  100. enum wmi_sta_ps_mode psmode);
  101. struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
  102. enum wmi_sta_powersave_param param_id,
  103. u32 value);
  104. struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
  105. const u8 *mac,
  106. enum wmi_ap_ps_peer_param param_id,
  107. u32 value);
  108. struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
  109. const struct wmi_scan_chan_list_arg *arg);
  110. struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
  111. const void *bcn, size_t bcn_len,
  112. u32 bcn_paddr, bool dtim_zero,
  113. bool deliver_cab);
  114. struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
  115. const struct wmi_wmm_params_all_arg *arg);
  116. struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
  117. struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
  118. enum wmi_force_fw_hang_type type,
  119. u32 delay_ms);
  120. struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
  121. struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
  122. u32 log_level);
  123. struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
  124. struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
  125. struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
  126. u32 period, u32 duration,
  127. u32 next_offset,
  128. u32 enabled);
  129. struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
  130. struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
  131. const u8 *mac);
  132. struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
  133. const u8 *mac, u32 tid, u32 buf_size);
  134. struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
  135. const u8 *mac, u32 tid,
  136. u32 status);
  137. struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
  138. const u8 *mac, u32 tid, u32 initiator,
  139. u32 reason);
  140. struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
  141. u32 tim_ie_offset, struct sk_buff *bcn,
  142. u32 prb_caps, u32 prb_erp,
  143. void *prb_ies, size_t prb_ies_len);
  144. struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
  145. struct sk_buff *bcn);
  146. struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
  147. const u8 *p2p_ie);
  148. struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
  149. const u8 peer_addr[ETH_ALEN],
  150. const struct wmi_sta_uapsd_auto_trig_arg *args,
  151. u32 num_ac);
  152. struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
  153. const struct wmi_sta_keepalive_arg *arg);
  154. struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
  155. struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
  156. enum wmi_wow_wakeup_event event,
  157. u32 enable);
  158. struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
  159. struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
  160. u32 pattern_id,
  161. const u8 *pattern,
  162. const u8 *mask,
  163. int pattern_len,
  164. int pattern_offset);
  165. struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
  166. u32 pattern_id);
  167. struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
  168. u32 vdev_id,
  169. enum wmi_tdls_state state);
  170. struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
  171. const struct wmi_tdls_peer_update_cmd_arg *arg,
  172. const struct wmi_tdls_peer_capab_arg *cap,
  173. const struct wmi_channel_arg *chan);
  174. struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
  175. struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
  176. u32 param);
  177. void (*fw_stats_fill)(struct ath10k *ar,
  178. struct ath10k_fw_stats *fw_stats,
  179. char *buf);
  180. struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
  181. u8 enable,
  182. u32 detect_level,
  183. u32 detect_margin);
  184. struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
  185. enum wmi_host_platform_type type,
  186. u32 fw_feature_bitmap);
  187. int (*get_vdev_subtype)(struct ath10k *ar,
  188. enum wmi_vdev_subtype subtype);
  189. struct sk_buff *(*gen_pdev_bss_chan_info_req)
  190. (struct ath10k *ar,
  191. enum wmi_bss_survey_req_type type);
  192. };
  193. int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
  194. static inline int
  195. ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
  196. {
  197. if (WARN_ON_ONCE(!ar->wmi.ops->rx))
  198. return -EOPNOTSUPP;
  199. ar->wmi.ops->rx(ar, skb);
  200. return 0;
  201. }
  202. static inline int
  203. ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
  204. size_t len)
  205. {
  206. if (!ar->wmi.ops->map_svc)
  207. return -EOPNOTSUPP;
  208. ar->wmi.ops->map_svc(in, out, len);
  209. return 0;
  210. }
  211. static inline int
  212. ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
  213. struct wmi_scan_ev_arg *arg)
  214. {
  215. if (!ar->wmi.ops->pull_scan)
  216. return -EOPNOTSUPP;
  217. return ar->wmi.ops->pull_scan(ar, skb, arg);
  218. }
  219. static inline int
  220. ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
  221. struct wmi_mgmt_rx_ev_arg *arg)
  222. {
  223. if (!ar->wmi.ops->pull_mgmt_rx)
  224. return -EOPNOTSUPP;
  225. return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
  226. }
  227. static inline int
  228. ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
  229. struct wmi_ch_info_ev_arg *arg)
  230. {
  231. if (!ar->wmi.ops->pull_ch_info)
  232. return -EOPNOTSUPP;
  233. return ar->wmi.ops->pull_ch_info(ar, skb, arg);
  234. }
  235. static inline int
  236. ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
  237. struct wmi_vdev_start_ev_arg *arg)
  238. {
  239. if (!ar->wmi.ops->pull_vdev_start)
  240. return -EOPNOTSUPP;
  241. return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
  242. }
  243. static inline int
  244. ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
  245. struct wmi_peer_kick_ev_arg *arg)
  246. {
  247. if (!ar->wmi.ops->pull_peer_kick)
  248. return -EOPNOTSUPP;
  249. return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
  250. }
  251. static inline int
  252. ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
  253. struct wmi_swba_ev_arg *arg)
  254. {
  255. if (!ar->wmi.ops->pull_swba)
  256. return -EOPNOTSUPP;
  257. return ar->wmi.ops->pull_swba(ar, skb, arg);
  258. }
  259. static inline int
  260. ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
  261. struct wmi_phyerr_hdr_arg *arg)
  262. {
  263. if (!ar->wmi.ops->pull_phyerr_hdr)
  264. return -EOPNOTSUPP;
  265. return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
  266. }
  267. static inline int
  268. ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
  269. int left_len, struct wmi_phyerr_ev_arg *arg)
  270. {
  271. if (!ar->wmi.ops->pull_phyerr)
  272. return -EOPNOTSUPP;
  273. return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
  274. }
  275. static inline int
  276. ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
  277. struct wmi_svc_rdy_ev_arg *arg)
  278. {
  279. if (!ar->wmi.ops->pull_svc_rdy)
  280. return -EOPNOTSUPP;
  281. return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
  282. }
  283. static inline int
  284. ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
  285. struct wmi_rdy_ev_arg *arg)
  286. {
  287. if (!ar->wmi.ops->pull_rdy)
  288. return -EOPNOTSUPP;
  289. return ar->wmi.ops->pull_rdy(ar, skb, arg);
  290. }
  291. static inline int
  292. ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
  293. struct ath10k_fw_stats *stats)
  294. {
  295. if (!ar->wmi.ops->pull_fw_stats)
  296. return -EOPNOTSUPP;
  297. return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
  298. }
  299. static inline int
  300. ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
  301. struct wmi_roam_ev_arg *arg)
  302. {
  303. if (!ar->wmi.ops->pull_roam_ev)
  304. return -EOPNOTSUPP;
  305. return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
  306. }
  307. static inline int
  308. ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
  309. struct wmi_wow_ev_arg *arg)
  310. {
  311. if (!ar->wmi.ops->pull_wow_event)
  312. return -EOPNOTSUPP;
  313. return ar->wmi.ops->pull_wow_event(ar, skb, arg);
  314. }
  315. static inline enum wmi_txbf_conf
  316. ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
  317. {
  318. if (!ar->wmi.ops->get_txbf_conf_scheme)
  319. return WMI_TXBF_CONF_UNSUPPORTED;
  320. return ar->wmi.ops->get_txbf_conf_scheme(ar);
  321. }
  322. static inline int
  323. ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
  324. {
  325. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
  326. struct sk_buff *skb;
  327. int ret;
  328. if (!ar->wmi.ops->gen_mgmt_tx)
  329. return -EOPNOTSUPP;
  330. skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
  331. if (IS_ERR(skb))
  332. return PTR_ERR(skb);
  333. ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
  334. if (ret)
  335. return ret;
  336. /* FIXME There's no ACK event for Management Tx. This probably
  337. * shouldn't be called here either. */
  338. info->flags |= IEEE80211_TX_STAT_ACK;
  339. ieee80211_tx_status_irqsafe(ar->hw, msdu);
  340. return 0;
  341. }
  342. static inline int
  343. ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
  344. u16 ctl2g, u16 ctl5g,
  345. enum wmi_dfs_region dfs_reg)
  346. {
  347. struct sk_buff *skb;
  348. if (!ar->wmi.ops->gen_pdev_set_rd)
  349. return -EOPNOTSUPP;
  350. skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
  351. dfs_reg);
  352. if (IS_ERR(skb))
  353. return PTR_ERR(skb);
  354. return ath10k_wmi_cmd_send(ar, skb,
  355. ar->wmi.cmd->pdev_set_regdomain_cmdid);
  356. }
  357. static inline int
  358. ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
  359. {
  360. struct sk_buff *skb;
  361. if (!ar->wmi.ops->gen_pdev_suspend)
  362. return -EOPNOTSUPP;
  363. skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
  364. if (IS_ERR(skb))
  365. return PTR_ERR(skb);
  366. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
  367. }
  368. static inline int
  369. ath10k_wmi_pdev_resume_target(struct ath10k *ar)
  370. {
  371. struct sk_buff *skb;
  372. if (!ar->wmi.ops->gen_pdev_resume)
  373. return -EOPNOTSUPP;
  374. skb = ar->wmi.ops->gen_pdev_resume(ar);
  375. if (IS_ERR(skb))
  376. return PTR_ERR(skb);
  377. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
  378. }
  379. static inline int
  380. ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
  381. {
  382. struct sk_buff *skb;
  383. if (!ar->wmi.ops->gen_pdev_set_param)
  384. return -EOPNOTSUPP;
  385. skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
  386. if (IS_ERR(skb))
  387. return PTR_ERR(skb);
  388. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
  389. }
  390. static inline int
  391. ath10k_wmi_cmd_init(struct ath10k *ar)
  392. {
  393. struct sk_buff *skb;
  394. if (!ar->wmi.ops->gen_init)
  395. return -EOPNOTSUPP;
  396. skb = ar->wmi.ops->gen_init(ar);
  397. if (IS_ERR(skb))
  398. return PTR_ERR(skb);
  399. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
  400. }
  401. static inline int
  402. ath10k_wmi_start_scan(struct ath10k *ar,
  403. const struct wmi_start_scan_arg *arg)
  404. {
  405. struct sk_buff *skb;
  406. if (!ar->wmi.ops->gen_start_scan)
  407. return -EOPNOTSUPP;
  408. skb = ar->wmi.ops->gen_start_scan(ar, arg);
  409. if (IS_ERR(skb))
  410. return PTR_ERR(skb);
  411. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
  412. }
  413. static inline int
  414. ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
  415. {
  416. struct sk_buff *skb;
  417. if (!ar->wmi.ops->gen_stop_scan)
  418. return -EOPNOTSUPP;
  419. skb = ar->wmi.ops->gen_stop_scan(ar, arg);
  420. if (IS_ERR(skb))
  421. return PTR_ERR(skb);
  422. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
  423. }
  424. static inline int
  425. ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
  426. enum wmi_vdev_type type,
  427. enum wmi_vdev_subtype subtype,
  428. const u8 macaddr[ETH_ALEN])
  429. {
  430. struct sk_buff *skb;
  431. if (!ar->wmi.ops->gen_vdev_create)
  432. return -EOPNOTSUPP;
  433. skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
  434. if (IS_ERR(skb))
  435. return PTR_ERR(skb);
  436. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
  437. }
  438. static inline int
  439. ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
  440. {
  441. struct sk_buff *skb;
  442. if (!ar->wmi.ops->gen_vdev_delete)
  443. return -EOPNOTSUPP;
  444. skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
  445. if (IS_ERR(skb))
  446. return PTR_ERR(skb);
  447. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
  448. }
  449. static inline int
  450. ath10k_wmi_vdev_start(struct ath10k *ar,
  451. const struct wmi_vdev_start_request_arg *arg)
  452. {
  453. struct sk_buff *skb;
  454. if (!ar->wmi.ops->gen_vdev_start)
  455. return -EOPNOTSUPP;
  456. skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
  457. if (IS_ERR(skb))
  458. return PTR_ERR(skb);
  459. return ath10k_wmi_cmd_send(ar, skb,
  460. ar->wmi.cmd->vdev_start_request_cmdid);
  461. }
  462. static inline int
  463. ath10k_wmi_vdev_restart(struct ath10k *ar,
  464. const struct wmi_vdev_start_request_arg *arg)
  465. {
  466. struct sk_buff *skb;
  467. if (!ar->wmi.ops->gen_vdev_start)
  468. return -EOPNOTSUPP;
  469. skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
  470. if (IS_ERR(skb))
  471. return PTR_ERR(skb);
  472. return ath10k_wmi_cmd_send(ar, skb,
  473. ar->wmi.cmd->vdev_restart_request_cmdid);
  474. }
  475. static inline int
  476. ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
  477. {
  478. struct sk_buff *skb;
  479. if (!ar->wmi.ops->gen_vdev_stop)
  480. return -EOPNOTSUPP;
  481. skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
  482. if (IS_ERR(skb))
  483. return PTR_ERR(skb);
  484. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
  485. }
  486. static inline int
  487. ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
  488. {
  489. struct sk_buff *skb;
  490. if (!ar->wmi.ops->gen_vdev_up)
  491. return -EOPNOTSUPP;
  492. skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
  493. if (IS_ERR(skb))
  494. return PTR_ERR(skb);
  495. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
  496. }
  497. static inline int
  498. ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
  499. {
  500. struct sk_buff *skb;
  501. if (!ar->wmi.ops->gen_vdev_down)
  502. return -EOPNOTSUPP;
  503. skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
  504. if (IS_ERR(skb))
  505. return PTR_ERR(skb);
  506. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
  507. }
  508. static inline int
  509. ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
  510. u32 param_value)
  511. {
  512. struct sk_buff *skb;
  513. if (!ar->wmi.ops->gen_vdev_set_param)
  514. return -EOPNOTSUPP;
  515. skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
  516. param_value);
  517. if (IS_ERR(skb))
  518. return PTR_ERR(skb);
  519. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
  520. }
  521. static inline int
  522. ath10k_wmi_vdev_install_key(struct ath10k *ar,
  523. const struct wmi_vdev_install_key_arg *arg)
  524. {
  525. struct sk_buff *skb;
  526. if (!ar->wmi.ops->gen_vdev_install_key)
  527. return -EOPNOTSUPP;
  528. skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
  529. if (IS_ERR(skb))
  530. return PTR_ERR(skb);
  531. return ath10k_wmi_cmd_send(ar, skb,
  532. ar->wmi.cmd->vdev_install_key_cmdid);
  533. }
  534. static inline int
  535. ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
  536. const struct wmi_vdev_spectral_conf_arg *arg)
  537. {
  538. struct sk_buff *skb;
  539. u32 cmd_id;
  540. skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
  541. if (IS_ERR(skb))
  542. return PTR_ERR(skb);
  543. cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
  544. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  545. }
  546. static inline int
  547. ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
  548. u32 enable)
  549. {
  550. struct sk_buff *skb;
  551. u32 cmd_id;
  552. skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
  553. enable);
  554. if (IS_ERR(skb))
  555. return PTR_ERR(skb);
  556. cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
  557. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  558. }
  559. static inline int
  560. ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
  561. const u8 peer_addr[ETH_ALEN],
  562. const struct wmi_sta_uapsd_auto_trig_arg *args,
  563. u32 num_ac)
  564. {
  565. struct sk_buff *skb;
  566. u32 cmd_id;
  567. if (!ar->wmi.ops->gen_vdev_sta_uapsd)
  568. return -EOPNOTSUPP;
  569. skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
  570. num_ac);
  571. if (IS_ERR(skb))
  572. return PTR_ERR(skb);
  573. cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
  574. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  575. }
  576. static inline int
  577. ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
  578. const struct wmi_wmm_params_all_arg *arg)
  579. {
  580. struct sk_buff *skb;
  581. u32 cmd_id;
  582. skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
  583. if (IS_ERR(skb))
  584. return PTR_ERR(skb);
  585. cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
  586. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  587. }
  588. static inline int
  589. ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
  590. const u8 peer_addr[ETH_ALEN],
  591. enum wmi_peer_type peer_type)
  592. {
  593. struct sk_buff *skb;
  594. if (!ar->wmi.ops->gen_peer_create)
  595. return -EOPNOTSUPP;
  596. skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
  597. if (IS_ERR(skb))
  598. return PTR_ERR(skb);
  599. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
  600. }
  601. static inline int
  602. ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
  603. const u8 peer_addr[ETH_ALEN])
  604. {
  605. struct sk_buff *skb;
  606. if (!ar->wmi.ops->gen_peer_delete)
  607. return -EOPNOTSUPP;
  608. skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
  609. if (IS_ERR(skb))
  610. return PTR_ERR(skb);
  611. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
  612. }
  613. static inline int
  614. ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
  615. const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  616. {
  617. struct sk_buff *skb;
  618. if (!ar->wmi.ops->gen_peer_flush)
  619. return -EOPNOTSUPP;
  620. skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
  621. if (IS_ERR(skb))
  622. return PTR_ERR(skb);
  623. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
  624. }
  625. static inline int
  626. ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
  627. enum wmi_peer_param param_id, u32 param_value)
  628. {
  629. struct sk_buff *skb;
  630. if (!ar->wmi.ops->gen_peer_set_param)
  631. return -EOPNOTSUPP;
  632. skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
  633. param_value);
  634. if (IS_ERR(skb))
  635. return PTR_ERR(skb);
  636. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
  637. }
  638. static inline int
  639. ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
  640. enum wmi_sta_ps_mode psmode)
  641. {
  642. struct sk_buff *skb;
  643. if (!ar->wmi.ops->gen_set_psmode)
  644. return -EOPNOTSUPP;
  645. skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
  646. if (IS_ERR(skb))
  647. return PTR_ERR(skb);
  648. return ath10k_wmi_cmd_send(ar, skb,
  649. ar->wmi.cmd->sta_powersave_mode_cmdid);
  650. }
  651. static inline int
  652. ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
  653. enum wmi_sta_powersave_param param_id, u32 value)
  654. {
  655. struct sk_buff *skb;
  656. if (!ar->wmi.ops->gen_set_sta_ps)
  657. return -EOPNOTSUPP;
  658. skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
  659. if (IS_ERR(skb))
  660. return PTR_ERR(skb);
  661. return ath10k_wmi_cmd_send(ar, skb,
  662. ar->wmi.cmd->sta_powersave_param_cmdid);
  663. }
  664. static inline int
  665. ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  666. enum wmi_ap_ps_peer_param param_id, u32 value)
  667. {
  668. struct sk_buff *skb;
  669. if (!ar->wmi.ops->gen_set_ap_ps)
  670. return -EOPNOTSUPP;
  671. skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
  672. if (IS_ERR(skb))
  673. return PTR_ERR(skb);
  674. return ath10k_wmi_cmd_send(ar, skb,
  675. ar->wmi.cmd->ap_ps_peer_param_cmdid);
  676. }
  677. static inline int
  678. ath10k_wmi_scan_chan_list(struct ath10k *ar,
  679. const struct wmi_scan_chan_list_arg *arg)
  680. {
  681. struct sk_buff *skb;
  682. if (!ar->wmi.ops->gen_scan_chan_list)
  683. return -EOPNOTSUPP;
  684. skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
  685. if (IS_ERR(skb))
  686. return PTR_ERR(skb);
  687. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
  688. }
  689. static inline int
  690. ath10k_wmi_peer_assoc(struct ath10k *ar,
  691. const struct wmi_peer_assoc_complete_arg *arg)
  692. {
  693. struct sk_buff *skb;
  694. if (!ar->wmi.ops->gen_peer_assoc)
  695. return -EOPNOTSUPP;
  696. skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
  697. if (IS_ERR(skb))
  698. return PTR_ERR(skb);
  699. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
  700. }
  701. static inline int
  702. ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
  703. const void *bcn, size_t bcn_len,
  704. u32 bcn_paddr, bool dtim_zero,
  705. bool deliver_cab)
  706. {
  707. struct sk_buff *skb;
  708. int ret;
  709. if (!ar->wmi.ops->gen_beacon_dma)
  710. return -EOPNOTSUPP;
  711. skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
  712. dtim_zero, deliver_cab);
  713. if (IS_ERR(skb))
  714. return PTR_ERR(skb);
  715. ret = ath10k_wmi_cmd_send_nowait(ar, skb,
  716. ar->wmi.cmd->pdev_send_bcn_cmdid);
  717. if (ret) {
  718. dev_kfree_skb(skb);
  719. return ret;
  720. }
  721. return 0;
  722. }
  723. static inline int
  724. ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
  725. const struct wmi_wmm_params_all_arg *arg)
  726. {
  727. struct sk_buff *skb;
  728. if (!ar->wmi.ops->gen_pdev_set_wmm)
  729. return -EOPNOTSUPP;
  730. skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
  731. if (IS_ERR(skb))
  732. return PTR_ERR(skb);
  733. return ath10k_wmi_cmd_send(ar, skb,
  734. ar->wmi.cmd->pdev_set_wmm_params_cmdid);
  735. }
  736. static inline int
  737. ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
  738. {
  739. struct sk_buff *skb;
  740. if (!ar->wmi.ops->gen_request_stats)
  741. return -EOPNOTSUPP;
  742. skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
  743. if (IS_ERR(skb))
  744. return PTR_ERR(skb);
  745. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
  746. }
  747. static inline int
  748. ath10k_wmi_force_fw_hang(struct ath10k *ar,
  749. enum wmi_force_fw_hang_type type, u32 delay_ms)
  750. {
  751. struct sk_buff *skb;
  752. if (!ar->wmi.ops->gen_force_fw_hang)
  753. return -EOPNOTSUPP;
  754. skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
  755. if (IS_ERR(skb))
  756. return PTR_ERR(skb);
  757. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
  758. }
  759. static inline int
  760. ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
  761. {
  762. struct sk_buff *skb;
  763. if (!ar->wmi.ops->gen_dbglog_cfg)
  764. return -EOPNOTSUPP;
  765. skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
  766. if (IS_ERR(skb))
  767. return PTR_ERR(skb);
  768. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
  769. }
  770. static inline int
  771. ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
  772. {
  773. struct sk_buff *skb;
  774. if (!ar->wmi.ops->gen_pktlog_enable)
  775. return -EOPNOTSUPP;
  776. skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
  777. if (IS_ERR(skb))
  778. return PTR_ERR(skb);
  779. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
  780. }
  781. static inline int
  782. ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
  783. {
  784. struct sk_buff *skb;
  785. if (!ar->wmi.ops->gen_pktlog_disable)
  786. return -EOPNOTSUPP;
  787. skb = ar->wmi.ops->gen_pktlog_disable(ar);
  788. if (IS_ERR(skb))
  789. return PTR_ERR(skb);
  790. return ath10k_wmi_cmd_send(ar, skb,
  791. ar->wmi.cmd->pdev_pktlog_disable_cmdid);
  792. }
  793. static inline int
  794. ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
  795. u32 next_offset, u32 enabled)
  796. {
  797. struct sk_buff *skb;
  798. if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
  799. return -EOPNOTSUPP;
  800. skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
  801. next_offset, enabled);
  802. if (IS_ERR(skb))
  803. return PTR_ERR(skb);
  804. return ath10k_wmi_cmd_send(ar, skb,
  805. ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
  806. }
  807. static inline int
  808. ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
  809. {
  810. struct sk_buff *skb;
  811. if (!ar->wmi.ops->gen_pdev_get_temperature)
  812. return -EOPNOTSUPP;
  813. skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
  814. if (IS_ERR(skb))
  815. return PTR_ERR(skb);
  816. return ath10k_wmi_cmd_send(ar, skb,
  817. ar->wmi.cmd->pdev_get_temperature_cmdid);
  818. }
  819. static inline int
  820. ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
  821. {
  822. struct sk_buff *skb;
  823. if (!ar->wmi.ops->gen_addba_clear_resp)
  824. return -EOPNOTSUPP;
  825. skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
  826. if (IS_ERR(skb))
  827. return PTR_ERR(skb);
  828. return ath10k_wmi_cmd_send(ar, skb,
  829. ar->wmi.cmd->addba_clear_resp_cmdid);
  830. }
  831. static inline int
  832. ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  833. u32 tid, u32 buf_size)
  834. {
  835. struct sk_buff *skb;
  836. if (!ar->wmi.ops->gen_addba_send)
  837. return -EOPNOTSUPP;
  838. skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
  839. if (IS_ERR(skb))
  840. return PTR_ERR(skb);
  841. return ath10k_wmi_cmd_send(ar, skb,
  842. ar->wmi.cmd->addba_send_cmdid);
  843. }
  844. static inline int
  845. ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  846. u32 tid, u32 status)
  847. {
  848. struct sk_buff *skb;
  849. if (!ar->wmi.ops->gen_addba_set_resp)
  850. return -EOPNOTSUPP;
  851. skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
  852. if (IS_ERR(skb))
  853. return PTR_ERR(skb);
  854. return ath10k_wmi_cmd_send(ar, skb,
  855. ar->wmi.cmd->addba_set_resp_cmdid);
  856. }
  857. static inline int
  858. ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  859. u32 tid, u32 initiator, u32 reason)
  860. {
  861. struct sk_buff *skb;
  862. if (!ar->wmi.ops->gen_delba_send)
  863. return -EOPNOTSUPP;
  864. skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
  865. reason);
  866. if (IS_ERR(skb))
  867. return PTR_ERR(skb);
  868. return ath10k_wmi_cmd_send(ar, skb,
  869. ar->wmi.cmd->delba_send_cmdid);
  870. }
  871. static inline int
  872. ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
  873. struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
  874. void *prb_ies, size_t prb_ies_len)
  875. {
  876. struct sk_buff *skb;
  877. if (!ar->wmi.ops->gen_bcn_tmpl)
  878. return -EOPNOTSUPP;
  879. skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
  880. prb_caps, prb_erp, prb_ies,
  881. prb_ies_len);
  882. if (IS_ERR(skb))
  883. return PTR_ERR(skb);
  884. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
  885. }
  886. static inline int
  887. ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
  888. {
  889. struct sk_buff *skb;
  890. if (!ar->wmi.ops->gen_prb_tmpl)
  891. return -EOPNOTSUPP;
  892. skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
  893. if (IS_ERR(skb))
  894. return PTR_ERR(skb);
  895. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
  896. }
  897. static inline int
  898. ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
  899. {
  900. struct sk_buff *skb;
  901. if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
  902. return -EOPNOTSUPP;
  903. skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
  904. if (IS_ERR(skb))
  905. return PTR_ERR(skb);
  906. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
  907. }
  908. static inline int
  909. ath10k_wmi_sta_keepalive(struct ath10k *ar,
  910. const struct wmi_sta_keepalive_arg *arg)
  911. {
  912. struct sk_buff *skb;
  913. u32 cmd_id;
  914. if (!ar->wmi.ops->gen_sta_keepalive)
  915. return -EOPNOTSUPP;
  916. skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
  917. if (IS_ERR(skb))
  918. return PTR_ERR(skb);
  919. cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
  920. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  921. }
  922. static inline int
  923. ath10k_wmi_wow_enable(struct ath10k *ar)
  924. {
  925. struct sk_buff *skb;
  926. u32 cmd_id;
  927. if (!ar->wmi.ops->gen_wow_enable)
  928. return -EOPNOTSUPP;
  929. skb = ar->wmi.ops->gen_wow_enable(ar);
  930. if (IS_ERR(skb))
  931. return PTR_ERR(skb);
  932. cmd_id = ar->wmi.cmd->wow_enable_cmdid;
  933. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  934. }
  935. static inline int
  936. ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
  937. enum wmi_wow_wakeup_event event,
  938. u32 enable)
  939. {
  940. struct sk_buff *skb;
  941. u32 cmd_id;
  942. if (!ar->wmi.ops->gen_wow_add_wakeup_event)
  943. return -EOPNOTSUPP;
  944. skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
  945. if (IS_ERR(skb))
  946. return PTR_ERR(skb);
  947. cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
  948. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  949. }
  950. static inline int
  951. ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
  952. {
  953. struct sk_buff *skb;
  954. u32 cmd_id;
  955. if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
  956. return -EOPNOTSUPP;
  957. skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
  958. if (IS_ERR(skb))
  959. return PTR_ERR(skb);
  960. cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
  961. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  962. }
  963. static inline int
  964. ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
  965. const u8 *pattern, const u8 *mask,
  966. int pattern_len, int pattern_offset)
  967. {
  968. struct sk_buff *skb;
  969. u32 cmd_id;
  970. if (!ar->wmi.ops->gen_wow_add_pattern)
  971. return -EOPNOTSUPP;
  972. skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
  973. pattern, mask, pattern_len,
  974. pattern_offset);
  975. if (IS_ERR(skb))
  976. return PTR_ERR(skb);
  977. cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
  978. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  979. }
  980. static inline int
  981. ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
  982. {
  983. struct sk_buff *skb;
  984. u32 cmd_id;
  985. if (!ar->wmi.ops->gen_wow_del_pattern)
  986. return -EOPNOTSUPP;
  987. skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
  988. if (IS_ERR(skb))
  989. return PTR_ERR(skb);
  990. cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
  991. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  992. }
  993. static inline int
  994. ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
  995. enum wmi_tdls_state state)
  996. {
  997. struct sk_buff *skb;
  998. if (!ar->wmi.ops->gen_update_fw_tdls_state)
  999. return -EOPNOTSUPP;
  1000. skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
  1001. if (IS_ERR(skb))
  1002. return PTR_ERR(skb);
  1003. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
  1004. }
  1005. static inline int
  1006. ath10k_wmi_tdls_peer_update(struct ath10k *ar,
  1007. const struct wmi_tdls_peer_update_cmd_arg *arg,
  1008. const struct wmi_tdls_peer_capab_arg *cap,
  1009. const struct wmi_channel_arg *chan)
  1010. {
  1011. struct sk_buff *skb;
  1012. if (!ar->wmi.ops->gen_tdls_peer_update)
  1013. return -EOPNOTSUPP;
  1014. skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
  1015. if (IS_ERR(skb))
  1016. return PTR_ERR(skb);
  1017. return ath10k_wmi_cmd_send(ar, skb,
  1018. ar->wmi.cmd->tdls_peer_update_cmdid);
  1019. }
  1020. static inline int
  1021. ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
  1022. {
  1023. struct sk_buff *skb;
  1024. if (!ar->wmi.ops->gen_adaptive_qcs)
  1025. return -EOPNOTSUPP;
  1026. skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
  1027. if (IS_ERR(skb))
  1028. return PTR_ERR(skb);
  1029. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
  1030. }
  1031. static inline int
  1032. ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
  1033. {
  1034. struct sk_buff *skb;
  1035. if (!ar->wmi.ops->gen_pdev_get_tpc_config)
  1036. return -EOPNOTSUPP;
  1037. skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
  1038. if (IS_ERR(skb))
  1039. return PTR_ERR(skb);
  1040. return ath10k_wmi_cmd_send(ar, skb,
  1041. ar->wmi.cmd->pdev_get_tpc_config_cmdid);
  1042. }
  1043. static inline int
  1044. ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
  1045. char *buf)
  1046. {
  1047. if (!ar->wmi.ops->fw_stats_fill)
  1048. return -EOPNOTSUPP;
  1049. ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
  1050. return 0;
  1051. }
  1052. static inline int
  1053. ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
  1054. u32 detect_level, u32 detect_margin)
  1055. {
  1056. struct sk_buff *skb;
  1057. if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
  1058. return -EOPNOTSUPP;
  1059. skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
  1060. detect_level,
  1061. detect_margin);
  1062. if (IS_ERR(skb))
  1063. return PTR_ERR(skb);
  1064. return ath10k_wmi_cmd_send(ar, skb,
  1065. ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
  1066. }
  1067. static inline int
  1068. ath10k_wmi_ext_resource_config(struct ath10k *ar,
  1069. enum wmi_host_platform_type type,
  1070. u32 fw_feature_bitmap)
  1071. {
  1072. struct sk_buff *skb;
  1073. if (!ar->wmi.ops->ext_resource_config)
  1074. return -EOPNOTSUPP;
  1075. skb = ar->wmi.ops->ext_resource_config(ar, type,
  1076. fw_feature_bitmap);
  1077. if (IS_ERR(skb))
  1078. return PTR_ERR(skb);
  1079. return ath10k_wmi_cmd_send(ar, skb,
  1080. ar->wmi.cmd->ext_resource_cfg_cmdid);
  1081. }
  1082. static inline int
  1083. ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
  1084. {
  1085. if (!ar->wmi.ops->get_vdev_subtype)
  1086. return -EOPNOTSUPP;
  1087. return ar->wmi.ops->get_vdev_subtype(ar, subtype);
  1088. }
  1089. static inline int
  1090. ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
  1091. enum wmi_bss_survey_req_type type)
  1092. {
  1093. struct ath10k_wmi *wmi = &ar->wmi;
  1094. struct sk_buff *skb;
  1095. if (!wmi->ops->gen_pdev_bss_chan_info_req)
  1096. return -EOPNOTSUPP;
  1097. skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
  1098. if (IS_ERR(skb))
  1099. return PTR_ERR(skb);
  1100. return ath10k_wmi_cmd_send(ar, skb,
  1101. wmi->cmd->pdev_bss_chan_info_request_cmdid);
  1102. }
  1103. #endif