wmi-ops.h 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #ifndef _WMI_OPS_H_
  18. #define _WMI_OPS_H_
  19. struct ath10k;
  20. struct sk_buff;
  21. struct wmi_ops {
  22. void (*rx)(struct ath10k *ar, struct sk_buff *skb);
  23. void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
  24. int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
  25. struct wmi_scan_ev_arg *arg);
  26. int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
  27. struct wmi_mgmt_rx_ev_arg *arg);
  28. int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
  29. struct wmi_ch_info_ev_arg *arg);
  30. int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
  31. struct wmi_vdev_start_ev_arg *arg);
  32. int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
  33. struct wmi_peer_kick_ev_arg *arg);
  34. int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
  35. struct wmi_swba_ev_arg *arg);
  36. int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
  37. struct wmi_phyerr_hdr_arg *arg);
  38. int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
  39. int left_len, struct wmi_phyerr_ev_arg *arg);
  40. int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
  41. struct wmi_svc_rdy_ev_arg *arg);
  42. int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
  43. struct wmi_rdy_ev_arg *arg);
  44. int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
  45. struct ath10k_fw_stats *stats);
  46. int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
  47. struct wmi_roam_ev_arg *arg);
  48. int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
  49. struct wmi_wow_ev_arg *arg);
  50. enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
  51. struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
  52. struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
  53. struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
  54. u16 rd5g, u16 ctl2g, u16 ctl5g,
  55. enum wmi_dfs_region dfs_reg);
  56. struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
  57. u32 value);
  58. struct sk_buff *(*gen_init)(struct ath10k *ar);
  59. struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
  60. const struct wmi_start_scan_arg *arg);
  61. struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
  62. const struct wmi_stop_scan_arg *arg);
  63. struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
  64. enum wmi_vdev_type type,
  65. enum wmi_vdev_subtype subtype,
  66. const u8 macaddr[ETH_ALEN]);
  67. struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
  68. struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
  69. const struct wmi_vdev_start_request_arg *arg,
  70. bool restart);
  71. struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
  72. struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
  73. const u8 *bssid);
  74. struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
  75. struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
  76. u32 param_id, u32 param_value);
  77. struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
  78. const struct wmi_vdev_install_key_arg *arg);
  79. struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
  80. const struct wmi_vdev_spectral_conf_arg *arg);
  81. struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
  82. u32 trigger, u32 enable);
  83. struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
  84. const struct wmi_wmm_params_all_arg *arg);
  85. struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
  86. const u8 peer_addr[ETH_ALEN],
  87. enum wmi_peer_type peer_type);
  88. struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
  89. const u8 peer_addr[ETH_ALEN]);
  90. struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
  91. const u8 peer_addr[ETH_ALEN],
  92. u32 tid_bitmap);
  93. struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
  94. const u8 *peer_addr,
  95. enum wmi_peer_param param_id,
  96. u32 param_value);
  97. struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
  98. const struct wmi_peer_assoc_complete_arg *arg);
  99. struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
  100. enum wmi_sta_ps_mode psmode);
  101. struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
  102. enum wmi_sta_powersave_param param_id,
  103. u32 value);
  104. struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
  105. const u8 *mac,
  106. enum wmi_ap_ps_peer_param param_id,
  107. u32 value);
  108. struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
  109. const struct wmi_scan_chan_list_arg *arg);
  110. struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
  111. const void *bcn, size_t bcn_len,
  112. u32 bcn_paddr, bool dtim_zero,
  113. bool deliver_cab);
  114. struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
  115. const struct wmi_wmm_params_all_arg *arg);
  116. struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
  117. struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
  118. enum wmi_force_fw_hang_type type,
  119. u32 delay_ms);
  120. struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
  121. struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
  122. u32 log_level);
  123. struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
  124. struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
  125. struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
  126. u32 period, u32 duration,
  127. u32 next_offset,
  128. u32 enabled);
  129. struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
  130. struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
  131. const u8 *mac);
  132. struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
  133. const u8 *mac, u32 tid, u32 buf_size);
  134. struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
  135. const u8 *mac, u32 tid,
  136. u32 status);
  137. struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
  138. const u8 *mac, u32 tid, u32 initiator,
  139. u32 reason);
  140. struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
  141. u32 tim_ie_offset, struct sk_buff *bcn,
  142. u32 prb_caps, u32 prb_erp,
  143. void *prb_ies, size_t prb_ies_len);
  144. struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
  145. struct sk_buff *bcn);
  146. struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
  147. const u8 *p2p_ie);
  148. struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
  149. const u8 peer_addr[ETH_ALEN],
  150. const struct wmi_sta_uapsd_auto_trig_arg *args,
  151. u32 num_ac);
  152. struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
  153. const struct wmi_sta_keepalive_arg *arg);
  154. struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
  155. struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
  156. enum wmi_wow_wakeup_event event,
  157. u32 enable);
  158. struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
  159. struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
  160. u32 pattern_id,
  161. const u8 *pattern,
  162. const u8 *mask,
  163. int pattern_len,
  164. int pattern_offset);
  165. struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
  166. u32 pattern_id);
  167. struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
  168. u32 vdev_id,
  169. enum wmi_tdls_state state);
  170. struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
  171. const struct wmi_tdls_peer_update_cmd_arg *arg,
  172. const struct wmi_tdls_peer_capab_arg *cap,
  173. const struct wmi_channel_arg *chan);
  174. struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
  175. struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
  176. u32 param);
  177. void (*fw_stats_fill)(struct ath10k *ar,
  178. struct ath10k_fw_stats *fw_stats,
  179. char *buf);
  180. struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
  181. u8 enable,
  182. u32 detect_level,
  183. u32 detect_margin);
  184. struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
  185. enum wmi_host_platform_type type,
  186. u32 fw_feature_bitmap);
  187. int (*get_vdev_subtype)(struct ath10k *ar,
  188. enum wmi_vdev_subtype subtype);
  189. };
  190. int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
  191. static inline int
  192. ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
  193. {
  194. if (WARN_ON_ONCE(!ar->wmi.ops->rx))
  195. return -EOPNOTSUPP;
  196. ar->wmi.ops->rx(ar, skb);
  197. return 0;
  198. }
  199. static inline int
  200. ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
  201. size_t len)
  202. {
  203. if (!ar->wmi.ops->map_svc)
  204. return -EOPNOTSUPP;
  205. ar->wmi.ops->map_svc(in, out, len);
  206. return 0;
  207. }
  208. static inline int
  209. ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
  210. struct wmi_scan_ev_arg *arg)
  211. {
  212. if (!ar->wmi.ops->pull_scan)
  213. return -EOPNOTSUPP;
  214. return ar->wmi.ops->pull_scan(ar, skb, arg);
  215. }
  216. static inline int
  217. ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
  218. struct wmi_mgmt_rx_ev_arg *arg)
  219. {
  220. if (!ar->wmi.ops->pull_mgmt_rx)
  221. return -EOPNOTSUPP;
  222. return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
  223. }
  224. static inline int
  225. ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
  226. struct wmi_ch_info_ev_arg *arg)
  227. {
  228. if (!ar->wmi.ops->pull_ch_info)
  229. return -EOPNOTSUPP;
  230. return ar->wmi.ops->pull_ch_info(ar, skb, arg);
  231. }
  232. static inline int
  233. ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
  234. struct wmi_vdev_start_ev_arg *arg)
  235. {
  236. if (!ar->wmi.ops->pull_vdev_start)
  237. return -EOPNOTSUPP;
  238. return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
  239. }
  240. static inline int
  241. ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
  242. struct wmi_peer_kick_ev_arg *arg)
  243. {
  244. if (!ar->wmi.ops->pull_peer_kick)
  245. return -EOPNOTSUPP;
  246. return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
  247. }
  248. static inline int
  249. ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
  250. struct wmi_swba_ev_arg *arg)
  251. {
  252. if (!ar->wmi.ops->pull_swba)
  253. return -EOPNOTSUPP;
  254. return ar->wmi.ops->pull_swba(ar, skb, arg);
  255. }
  256. static inline int
  257. ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
  258. struct wmi_phyerr_hdr_arg *arg)
  259. {
  260. if (!ar->wmi.ops->pull_phyerr_hdr)
  261. return -EOPNOTSUPP;
  262. return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
  263. }
  264. static inline int
  265. ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
  266. int left_len, struct wmi_phyerr_ev_arg *arg)
  267. {
  268. if (!ar->wmi.ops->pull_phyerr)
  269. return -EOPNOTSUPP;
  270. return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
  271. }
  272. static inline int
  273. ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
  274. struct wmi_svc_rdy_ev_arg *arg)
  275. {
  276. if (!ar->wmi.ops->pull_svc_rdy)
  277. return -EOPNOTSUPP;
  278. return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
  279. }
  280. static inline int
  281. ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
  282. struct wmi_rdy_ev_arg *arg)
  283. {
  284. if (!ar->wmi.ops->pull_rdy)
  285. return -EOPNOTSUPP;
  286. return ar->wmi.ops->pull_rdy(ar, skb, arg);
  287. }
  288. static inline int
  289. ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
  290. struct ath10k_fw_stats *stats)
  291. {
  292. if (!ar->wmi.ops->pull_fw_stats)
  293. return -EOPNOTSUPP;
  294. return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
  295. }
  296. static inline int
  297. ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
  298. struct wmi_roam_ev_arg *arg)
  299. {
  300. if (!ar->wmi.ops->pull_roam_ev)
  301. return -EOPNOTSUPP;
  302. return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
  303. }
  304. static inline int
  305. ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
  306. struct wmi_wow_ev_arg *arg)
  307. {
  308. if (!ar->wmi.ops->pull_wow_event)
  309. return -EOPNOTSUPP;
  310. return ar->wmi.ops->pull_wow_event(ar, skb, arg);
  311. }
  312. static inline enum wmi_txbf_conf
  313. ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
  314. {
  315. if (!ar->wmi.ops->get_txbf_conf_scheme)
  316. return WMI_TXBF_CONF_UNSUPPORTED;
  317. return ar->wmi.ops->get_txbf_conf_scheme(ar);
  318. }
  319. static inline int
  320. ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
  321. {
  322. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
  323. struct sk_buff *skb;
  324. int ret;
  325. if (!ar->wmi.ops->gen_mgmt_tx)
  326. return -EOPNOTSUPP;
  327. skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
  328. if (IS_ERR(skb))
  329. return PTR_ERR(skb);
  330. ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
  331. if (ret)
  332. return ret;
  333. /* FIXME There's no ACK event for Management Tx. This probably
  334. * shouldn't be called here either. */
  335. info->flags |= IEEE80211_TX_STAT_ACK;
  336. ieee80211_tx_status_irqsafe(ar->hw, msdu);
  337. return 0;
  338. }
  339. static inline int
  340. ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
  341. u16 ctl2g, u16 ctl5g,
  342. enum wmi_dfs_region dfs_reg)
  343. {
  344. struct sk_buff *skb;
  345. if (!ar->wmi.ops->gen_pdev_set_rd)
  346. return -EOPNOTSUPP;
  347. skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
  348. dfs_reg);
  349. if (IS_ERR(skb))
  350. return PTR_ERR(skb);
  351. return ath10k_wmi_cmd_send(ar, skb,
  352. ar->wmi.cmd->pdev_set_regdomain_cmdid);
  353. }
  354. static inline int
  355. ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
  356. {
  357. struct sk_buff *skb;
  358. if (!ar->wmi.ops->gen_pdev_suspend)
  359. return -EOPNOTSUPP;
  360. skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
  361. if (IS_ERR(skb))
  362. return PTR_ERR(skb);
  363. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
  364. }
  365. static inline int
  366. ath10k_wmi_pdev_resume_target(struct ath10k *ar)
  367. {
  368. struct sk_buff *skb;
  369. if (!ar->wmi.ops->gen_pdev_resume)
  370. return -EOPNOTSUPP;
  371. skb = ar->wmi.ops->gen_pdev_resume(ar);
  372. if (IS_ERR(skb))
  373. return PTR_ERR(skb);
  374. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
  375. }
  376. static inline int
  377. ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
  378. {
  379. struct sk_buff *skb;
  380. if (!ar->wmi.ops->gen_pdev_set_param)
  381. return -EOPNOTSUPP;
  382. skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
  383. if (IS_ERR(skb))
  384. return PTR_ERR(skb);
  385. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
  386. }
  387. static inline int
  388. ath10k_wmi_cmd_init(struct ath10k *ar)
  389. {
  390. struct sk_buff *skb;
  391. if (!ar->wmi.ops->gen_init)
  392. return -EOPNOTSUPP;
  393. skb = ar->wmi.ops->gen_init(ar);
  394. if (IS_ERR(skb))
  395. return PTR_ERR(skb);
  396. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
  397. }
  398. static inline int
  399. ath10k_wmi_start_scan(struct ath10k *ar,
  400. const struct wmi_start_scan_arg *arg)
  401. {
  402. struct sk_buff *skb;
  403. if (!ar->wmi.ops->gen_start_scan)
  404. return -EOPNOTSUPP;
  405. skb = ar->wmi.ops->gen_start_scan(ar, arg);
  406. if (IS_ERR(skb))
  407. return PTR_ERR(skb);
  408. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
  409. }
  410. static inline int
  411. ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
  412. {
  413. struct sk_buff *skb;
  414. if (!ar->wmi.ops->gen_stop_scan)
  415. return -EOPNOTSUPP;
  416. skb = ar->wmi.ops->gen_stop_scan(ar, arg);
  417. if (IS_ERR(skb))
  418. return PTR_ERR(skb);
  419. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
  420. }
  421. static inline int
  422. ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
  423. enum wmi_vdev_type type,
  424. enum wmi_vdev_subtype subtype,
  425. const u8 macaddr[ETH_ALEN])
  426. {
  427. struct sk_buff *skb;
  428. if (!ar->wmi.ops->gen_vdev_create)
  429. return -EOPNOTSUPP;
  430. skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
  431. if (IS_ERR(skb))
  432. return PTR_ERR(skb);
  433. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
  434. }
  435. static inline int
  436. ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
  437. {
  438. struct sk_buff *skb;
  439. if (!ar->wmi.ops->gen_vdev_delete)
  440. return -EOPNOTSUPP;
  441. skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
  442. if (IS_ERR(skb))
  443. return PTR_ERR(skb);
  444. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
  445. }
  446. static inline int
  447. ath10k_wmi_vdev_start(struct ath10k *ar,
  448. const struct wmi_vdev_start_request_arg *arg)
  449. {
  450. struct sk_buff *skb;
  451. if (!ar->wmi.ops->gen_vdev_start)
  452. return -EOPNOTSUPP;
  453. skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
  454. if (IS_ERR(skb))
  455. return PTR_ERR(skb);
  456. return ath10k_wmi_cmd_send(ar, skb,
  457. ar->wmi.cmd->vdev_start_request_cmdid);
  458. }
  459. static inline int
  460. ath10k_wmi_vdev_restart(struct ath10k *ar,
  461. const struct wmi_vdev_start_request_arg *arg)
  462. {
  463. struct sk_buff *skb;
  464. if (!ar->wmi.ops->gen_vdev_start)
  465. return -EOPNOTSUPP;
  466. skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
  467. if (IS_ERR(skb))
  468. return PTR_ERR(skb);
  469. return ath10k_wmi_cmd_send(ar, skb,
  470. ar->wmi.cmd->vdev_restart_request_cmdid);
  471. }
  472. static inline int
  473. ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
  474. {
  475. struct sk_buff *skb;
  476. if (!ar->wmi.ops->gen_vdev_stop)
  477. return -EOPNOTSUPP;
  478. skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
  479. if (IS_ERR(skb))
  480. return PTR_ERR(skb);
  481. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
  482. }
  483. static inline int
  484. ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
  485. {
  486. struct sk_buff *skb;
  487. if (!ar->wmi.ops->gen_vdev_up)
  488. return -EOPNOTSUPP;
  489. skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
  490. if (IS_ERR(skb))
  491. return PTR_ERR(skb);
  492. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
  493. }
  494. static inline int
  495. ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
  496. {
  497. struct sk_buff *skb;
  498. if (!ar->wmi.ops->gen_vdev_down)
  499. return -EOPNOTSUPP;
  500. skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
  501. if (IS_ERR(skb))
  502. return PTR_ERR(skb);
  503. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
  504. }
  505. static inline int
  506. ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
  507. u32 param_value)
  508. {
  509. struct sk_buff *skb;
  510. if (!ar->wmi.ops->gen_vdev_set_param)
  511. return -EOPNOTSUPP;
  512. skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
  513. param_value);
  514. if (IS_ERR(skb))
  515. return PTR_ERR(skb);
  516. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
  517. }
  518. static inline int
  519. ath10k_wmi_vdev_install_key(struct ath10k *ar,
  520. const struct wmi_vdev_install_key_arg *arg)
  521. {
  522. struct sk_buff *skb;
  523. if (!ar->wmi.ops->gen_vdev_install_key)
  524. return -EOPNOTSUPP;
  525. skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
  526. if (IS_ERR(skb))
  527. return PTR_ERR(skb);
  528. return ath10k_wmi_cmd_send(ar, skb,
  529. ar->wmi.cmd->vdev_install_key_cmdid);
  530. }
  531. static inline int
  532. ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
  533. const struct wmi_vdev_spectral_conf_arg *arg)
  534. {
  535. struct sk_buff *skb;
  536. u32 cmd_id;
  537. skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
  538. if (IS_ERR(skb))
  539. return PTR_ERR(skb);
  540. cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
  541. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  542. }
  543. static inline int
  544. ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
  545. u32 enable)
  546. {
  547. struct sk_buff *skb;
  548. u32 cmd_id;
  549. skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
  550. enable);
  551. if (IS_ERR(skb))
  552. return PTR_ERR(skb);
  553. cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
  554. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  555. }
  556. static inline int
  557. ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
  558. const u8 peer_addr[ETH_ALEN],
  559. const struct wmi_sta_uapsd_auto_trig_arg *args,
  560. u32 num_ac)
  561. {
  562. struct sk_buff *skb;
  563. u32 cmd_id;
  564. if (!ar->wmi.ops->gen_vdev_sta_uapsd)
  565. return -EOPNOTSUPP;
  566. skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
  567. num_ac);
  568. if (IS_ERR(skb))
  569. return PTR_ERR(skb);
  570. cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
  571. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  572. }
  573. static inline int
  574. ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
  575. const struct wmi_wmm_params_all_arg *arg)
  576. {
  577. struct sk_buff *skb;
  578. u32 cmd_id;
  579. skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
  580. if (IS_ERR(skb))
  581. return PTR_ERR(skb);
  582. cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
  583. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  584. }
  585. static inline int
  586. ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
  587. const u8 peer_addr[ETH_ALEN],
  588. enum wmi_peer_type peer_type)
  589. {
  590. struct sk_buff *skb;
  591. if (!ar->wmi.ops->gen_peer_create)
  592. return -EOPNOTSUPP;
  593. skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
  594. if (IS_ERR(skb))
  595. return PTR_ERR(skb);
  596. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
  597. }
  598. static inline int
  599. ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
  600. const u8 peer_addr[ETH_ALEN])
  601. {
  602. struct sk_buff *skb;
  603. if (!ar->wmi.ops->gen_peer_delete)
  604. return -EOPNOTSUPP;
  605. skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
  606. if (IS_ERR(skb))
  607. return PTR_ERR(skb);
  608. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
  609. }
  610. static inline int
  611. ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
  612. const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  613. {
  614. struct sk_buff *skb;
  615. if (!ar->wmi.ops->gen_peer_flush)
  616. return -EOPNOTSUPP;
  617. skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
  618. if (IS_ERR(skb))
  619. return PTR_ERR(skb);
  620. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
  621. }
  622. static inline int
  623. ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
  624. enum wmi_peer_param param_id, u32 param_value)
  625. {
  626. struct sk_buff *skb;
  627. if (!ar->wmi.ops->gen_peer_set_param)
  628. return -EOPNOTSUPP;
  629. skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
  630. param_value);
  631. if (IS_ERR(skb))
  632. return PTR_ERR(skb);
  633. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
  634. }
  635. static inline int
  636. ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
  637. enum wmi_sta_ps_mode psmode)
  638. {
  639. struct sk_buff *skb;
  640. if (!ar->wmi.ops->gen_set_psmode)
  641. return -EOPNOTSUPP;
  642. skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
  643. if (IS_ERR(skb))
  644. return PTR_ERR(skb);
  645. return ath10k_wmi_cmd_send(ar, skb,
  646. ar->wmi.cmd->sta_powersave_mode_cmdid);
  647. }
  648. static inline int
  649. ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
  650. enum wmi_sta_powersave_param param_id, u32 value)
  651. {
  652. struct sk_buff *skb;
  653. if (!ar->wmi.ops->gen_set_sta_ps)
  654. return -EOPNOTSUPP;
  655. skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
  656. if (IS_ERR(skb))
  657. return PTR_ERR(skb);
  658. return ath10k_wmi_cmd_send(ar, skb,
  659. ar->wmi.cmd->sta_powersave_param_cmdid);
  660. }
  661. static inline int
  662. ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  663. enum wmi_ap_ps_peer_param param_id, u32 value)
  664. {
  665. struct sk_buff *skb;
  666. if (!ar->wmi.ops->gen_set_ap_ps)
  667. return -EOPNOTSUPP;
  668. skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
  669. if (IS_ERR(skb))
  670. return PTR_ERR(skb);
  671. return ath10k_wmi_cmd_send(ar, skb,
  672. ar->wmi.cmd->ap_ps_peer_param_cmdid);
  673. }
  674. static inline int
  675. ath10k_wmi_scan_chan_list(struct ath10k *ar,
  676. const struct wmi_scan_chan_list_arg *arg)
  677. {
  678. struct sk_buff *skb;
  679. if (!ar->wmi.ops->gen_scan_chan_list)
  680. return -EOPNOTSUPP;
  681. skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
  682. if (IS_ERR(skb))
  683. return PTR_ERR(skb);
  684. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
  685. }
  686. static inline int
  687. ath10k_wmi_peer_assoc(struct ath10k *ar,
  688. const struct wmi_peer_assoc_complete_arg *arg)
  689. {
  690. struct sk_buff *skb;
  691. if (!ar->wmi.ops->gen_peer_assoc)
  692. return -EOPNOTSUPP;
  693. skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
  694. if (IS_ERR(skb))
  695. return PTR_ERR(skb);
  696. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
  697. }
  698. static inline int
  699. ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
  700. const void *bcn, size_t bcn_len,
  701. u32 bcn_paddr, bool dtim_zero,
  702. bool deliver_cab)
  703. {
  704. struct sk_buff *skb;
  705. int ret;
  706. if (!ar->wmi.ops->gen_beacon_dma)
  707. return -EOPNOTSUPP;
  708. skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
  709. dtim_zero, deliver_cab);
  710. if (IS_ERR(skb))
  711. return PTR_ERR(skb);
  712. ret = ath10k_wmi_cmd_send_nowait(ar, skb,
  713. ar->wmi.cmd->pdev_send_bcn_cmdid);
  714. if (ret) {
  715. dev_kfree_skb(skb);
  716. return ret;
  717. }
  718. return 0;
  719. }
  720. static inline int
  721. ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
  722. const struct wmi_wmm_params_all_arg *arg)
  723. {
  724. struct sk_buff *skb;
  725. if (!ar->wmi.ops->gen_pdev_set_wmm)
  726. return -EOPNOTSUPP;
  727. skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
  728. if (IS_ERR(skb))
  729. return PTR_ERR(skb);
  730. return ath10k_wmi_cmd_send(ar, skb,
  731. ar->wmi.cmd->pdev_set_wmm_params_cmdid);
  732. }
  733. static inline int
  734. ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
  735. {
  736. struct sk_buff *skb;
  737. if (!ar->wmi.ops->gen_request_stats)
  738. return -EOPNOTSUPP;
  739. skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
  740. if (IS_ERR(skb))
  741. return PTR_ERR(skb);
  742. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
  743. }
  744. static inline int
  745. ath10k_wmi_force_fw_hang(struct ath10k *ar,
  746. enum wmi_force_fw_hang_type type, u32 delay_ms)
  747. {
  748. struct sk_buff *skb;
  749. if (!ar->wmi.ops->gen_force_fw_hang)
  750. return -EOPNOTSUPP;
  751. skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
  752. if (IS_ERR(skb))
  753. return PTR_ERR(skb);
  754. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
  755. }
  756. static inline int
  757. ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
  758. {
  759. struct sk_buff *skb;
  760. if (!ar->wmi.ops->gen_dbglog_cfg)
  761. return -EOPNOTSUPP;
  762. skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
  763. if (IS_ERR(skb))
  764. return PTR_ERR(skb);
  765. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
  766. }
  767. static inline int
  768. ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
  769. {
  770. struct sk_buff *skb;
  771. if (!ar->wmi.ops->gen_pktlog_enable)
  772. return -EOPNOTSUPP;
  773. skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
  774. if (IS_ERR(skb))
  775. return PTR_ERR(skb);
  776. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
  777. }
  778. static inline int
  779. ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
  780. {
  781. struct sk_buff *skb;
  782. if (!ar->wmi.ops->gen_pktlog_disable)
  783. return -EOPNOTSUPP;
  784. skb = ar->wmi.ops->gen_pktlog_disable(ar);
  785. if (IS_ERR(skb))
  786. return PTR_ERR(skb);
  787. return ath10k_wmi_cmd_send(ar, skb,
  788. ar->wmi.cmd->pdev_pktlog_disable_cmdid);
  789. }
  790. static inline int
  791. ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
  792. u32 next_offset, u32 enabled)
  793. {
  794. struct sk_buff *skb;
  795. if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
  796. return -EOPNOTSUPP;
  797. skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
  798. next_offset, enabled);
  799. if (IS_ERR(skb))
  800. return PTR_ERR(skb);
  801. return ath10k_wmi_cmd_send(ar, skb,
  802. ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
  803. }
  804. static inline int
  805. ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
  806. {
  807. struct sk_buff *skb;
  808. if (!ar->wmi.ops->gen_pdev_get_temperature)
  809. return -EOPNOTSUPP;
  810. skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
  811. if (IS_ERR(skb))
  812. return PTR_ERR(skb);
  813. return ath10k_wmi_cmd_send(ar, skb,
  814. ar->wmi.cmd->pdev_get_temperature_cmdid);
  815. }
  816. static inline int
  817. ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
  818. {
  819. struct sk_buff *skb;
  820. if (!ar->wmi.ops->gen_addba_clear_resp)
  821. return -EOPNOTSUPP;
  822. skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
  823. if (IS_ERR(skb))
  824. return PTR_ERR(skb);
  825. return ath10k_wmi_cmd_send(ar, skb,
  826. ar->wmi.cmd->addba_clear_resp_cmdid);
  827. }
  828. static inline int
  829. ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  830. u32 tid, u32 buf_size)
  831. {
  832. struct sk_buff *skb;
  833. if (!ar->wmi.ops->gen_addba_send)
  834. return -EOPNOTSUPP;
  835. skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
  836. if (IS_ERR(skb))
  837. return PTR_ERR(skb);
  838. return ath10k_wmi_cmd_send(ar, skb,
  839. ar->wmi.cmd->addba_send_cmdid);
  840. }
  841. static inline int
  842. ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  843. u32 tid, u32 status)
  844. {
  845. struct sk_buff *skb;
  846. if (!ar->wmi.ops->gen_addba_set_resp)
  847. return -EOPNOTSUPP;
  848. skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
  849. if (IS_ERR(skb))
  850. return PTR_ERR(skb);
  851. return ath10k_wmi_cmd_send(ar, skb,
  852. ar->wmi.cmd->addba_set_resp_cmdid);
  853. }
  854. static inline int
  855. ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  856. u32 tid, u32 initiator, u32 reason)
  857. {
  858. struct sk_buff *skb;
  859. if (!ar->wmi.ops->gen_delba_send)
  860. return -EOPNOTSUPP;
  861. skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
  862. reason);
  863. if (IS_ERR(skb))
  864. return PTR_ERR(skb);
  865. return ath10k_wmi_cmd_send(ar, skb,
  866. ar->wmi.cmd->delba_send_cmdid);
  867. }
  868. static inline int
  869. ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
  870. struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
  871. void *prb_ies, size_t prb_ies_len)
  872. {
  873. struct sk_buff *skb;
  874. if (!ar->wmi.ops->gen_bcn_tmpl)
  875. return -EOPNOTSUPP;
  876. skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
  877. prb_caps, prb_erp, prb_ies,
  878. prb_ies_len);
  879. if (IS_ERR(skb))
  880. return PTR_ERR(skb);
  881. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
  882. }
  883. static inline int
  884. ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
  885. {
  886. struct sk_buff *skb;
  887. if (!ar->wmi.ops->gen_prb_tmpl)
  888. return -EOPNOTSUPP;
  889. skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
  890. if (IS_ERR(skb))
  891. return PTR_ERR(skb);
  892. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
  893. }
  894. static inline int
  895. ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
  896. {
  897. struct sk_buff *skb;
  898. if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
  899. return -EOPNOTSUPP;
  900. skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
  901. if (IS_ERR(skb))
  902. return PTR_ERR(skb);
  903. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
  904. }
  905. static inline int
  906. ath10k_wmi_sta_keepalive(struct ath10k *ar,
  907. const struct wmi_sta_keepalive_arg *arg)
  908. {
  909. struct sk_buff *skb;
  910. u32 cmd_id;
  911. if (!ar->wmi.ops->gen_sta_keepalive)
  912. return -EOPNOTSUPP;
  913. skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
  914. if (IS_ERR(skb))
  915. return PTR_ERR(skb);
  916. cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
  917. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  918. }
  919. static inline int
  920. ath10k_wmi_wow_enable(struct ath10k *ar)
  921. {
  922. struct sk_buff *skb;
  923. u32 cmd_id;
  924. if (!ar->wmi.ops->gen_wow_enable)
  925. return -EOPNOTSUPP;
  926. skb = ar->wmi.ops->gen_wow_enable(ar);
  927. if (IS_ERR(skb))
  928. return PTR_ERR(skb);
  929. cmd_id = ar->wmi.cmd->wow_enable_cmdid;
  930. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  931. }
  932. static inline int
  933. ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
  934. enum wmi_wow_wakeup_event event,
  935. u32 enable)
  936. {
  937. struct sk_buff *skb;
  938. u32 cmd_id;
  939. if (!ar->wmi.ops->gen_wow_add_wakeup_event)
  940. return -EOPNOTSUPP;
  941. skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
  942. if (IS_ERR(skb))
  943. return PTR_ERR(skb);
  944. cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
  945. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  946. }
  947. static inline int
  948. ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
  949. {
  950. struct sk_buff *skb;
  951. u32 cmd_id;
  952. if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
  953. return -EOPNOTSUPP;
  954. skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
  955. if (IS_ERR(skb))
  956. return PTR_ERR(skb);
  957. cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
  958. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  959. }
  960. static inline int
  961. ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
  962. const u8 *pattern, const u8 *mask,
  963. int pattern_len, int pattern_offset)
  964. {
  965. struct sk_buff *skb;
  966. u32 cmd_id;
  967. if (!ar->wmi.ops->gen_wow_add_pattern)
  968. return -EOPNOTSUPP;
  969. skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
  970. pattern, mask, pattern_len,
  971. pattern_offset);
  972. if (IS_ERR(skb))
  973. return PTR_ERR(skb);
  974. cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
  975. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  976. }
  977. static inline int
  978. ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
  979. {
  980. struct sk_buff *skb;
  981. u32 cmd_id;
  982. if (!ar->wmi.ops->gen_wow_del_pattern)
  983. return -EOPNOTSUPP;
  984. skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
  985. if (IS_ERR(skb))
  986. return PTR_ERR(skb);
  987. cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
  988. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  989. }
  990. static inline int
  991. ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
  992. enum wmi_tdls_state state)
  993. {
  994. struct sk_buff *skb;
  995. if (!ar->wmi.ops->gen_update_fw_tdls_state)
  996. return -EOPNOTSUPP;
  997. skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
  998. if (IS_ERR(skb))
  999. return PTR_ERR(skb);
  1000. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
  1001. }
  1002. static inline int
  1003. ath10k_wmi_tdls_peer_update(struct ath10k *ar,
  1004. const struct wmi_tdls_peer_update_cmd_arg *arg,
  1005. const struct wmi_tdls_peer_capab_arg *cap,
  1006. const struct wmi_channel_arg *chan)
  1007. {
  1008. struct sk_buff *skb;
  1009. if (!ar->wmi.ops->gen_tdls_peer_update)
  1010. return -EOPNOTSUPP;
  1011. skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
  1012. if (IS_ERR(skb))
  1013. return PTR_ERR(skb);
  1014. return ath10k_wmi_cmd_send(ar, skb,
  1015. ar->wmi.cmd->tdls_peer_update_cmdid);
  1016. }
  1017. static inline int
  1018. ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
  1019. {
  1020. struct sk_buff *skb;
  1021. if (!ar->wmi.ops->gen_adaptive_qcs)
  1022. return -EOPNOTSUPP;
  1023. skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
  1024. if (IS_ERR(skb))
  1025. return PTR_ERR(skb);
  1026. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
  1027. }
  1028. static inline int
  1029. ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
  1030. {
  1031. struct sk_buff *skb;
  1032. if (!ar->wmi.ops->gen_pdev_get_tpc_config)
  1033. return -EOPNOTSUPP;
  1034. skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
  1035. if (IS_ERR(skb))
  1036. return PTR_ERR(skb);
  1037. return ath10k_wmi_cmd_send(ar, skb,
  1038. ar->wmi.cmd->pdev_get_tpc_config_cmdid);
  1039. }
  1040. static inline int
  1041. ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
  1042. char *buf)
  1043. {
  1044. if (!ar->wmi.ops->fw_stats_fill)
  1045. return -EOPNOTSUPP;
  1046. ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
  1047. return 0;
  1048. }
  1049. static inline int
  1050. ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
  1051. u32 detect_level, u32 detect_margin)
  1052. {
  1053. struct sk_buff *skb;
  1054. if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
  1055. return -EOPNOTSUPP;
  1056. skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
  1057. detect_level,
  1058. detect_margin);
  1059. if (IS_ERR(skb))
  1060. return PTR_ERR(skb);
  1061. return ath10k_wmi_cmd_send(ar, skb,
  1062. ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
  1063. }
  1064. static inline int
  1065. ath10k_wmi_ext_resource_config(struct ath10k *ar,
  1066. enum wmi_host_platform_type type,
  1067. u32 fw_feature_bitmap)
  1068. {
  1069. struct sk_buff *skb;
  1070. if (!ar->wmi.ops->ext_resource_config)
  1071. return -EOPNOTSUPP;
  1072. skb = ar->wmi.ops->ext_resource_config(ar, type,
  1073. fw_feature_bitmap);
  1074. if (IS_ERR(skb))
  1075. return PTR_ERR(skb);
  1076. return ath10k_wmi_cmd_send(ar, skb,
  1077. ar->wmi.cmd->ext_resource_cfg_cmdid);
  1078. }
  1079. static inline int
  1080. ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
  1081. {
  1082. if (!ar->wmi.ops->get_vdev_subtype)
  1083. return -EOPNOTSUPP;
  1084. return ar->wmi.ops->get_vdev_subtype(ar, subtype);
  1085. }
  1086. #endif