mt76.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445
  1. /*
  2. * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #ifndef __MT76_H
  17. #define __MT76_H
  18. #include <linux/kernel.h>
  19. #include <linux/io.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/leds.h>
  23. #include <net/mac80211.h>
  24. #include "util.h"
  25. #define MT_TX_RING_SIZE 256
  26. #define MT_MCU_RING_SIZE 32
  27. #define MT_RX_BUF_SIZE 2048
  28. struct mt76_dev;
  29. struct mt76_bus_ops {
  30. u32 (*rr)(struct mt76_dev *dev, u32 offset);
  31. void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
  32. u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
  33. void (*copy)(struct mt76_dev *dev, u32 offset, const void *data,
  34. int len);
  35. };
  36. enum mt76_txq_id {
  37. MT_TXQ_VO = IEEE80211_AC_VO,
  38. MT_TXQ_VI = IEEE80211_AC_VI,
  39. MT_TXQ_BE = IEEE80211_AC_BE,
  40. MT_TXQ_BK = IEEE80211_AC_BK,
  41. MT_TXQ_PSD,
  42. MT_TXQ_MCU,
  43. MT_TXQ_BEACON,
  44. MT_TXQ_CAB,
  45. __MT_TXQ_MAX
  46. };
  47. enum mt76_rxq_id {
  48. MT_RXQ_MAIN,
  49. MT_RXQ_MCU,
  50. __MT_RXQ_MAX
  51. };
  52. struct mt76_queue_buf {
  53. dma_addr_t addr;
  54. int len;
  55. };
  56. struct mt76_queue_entry {
  57. union {
  58. void *buf;
  59. struct sk_buff *skb;
  60. };
  61. struct mt76_txwi_cache *txwi;
  62. bool schedule;
  63. };
  64. struct mt76_queue_regs {
  65. u32 desc_base;
  66. u32 ring_size;
  67. u32 cpu_idx;
  68. u32 dma_idx;
  69. } __packed __aligned(4);
  70. struct mt76_queue {
  71. struct mt76_queue_regs __iomem *regs;
  72. spinlock_t lock;
  73. struct mt76_queue_entry *entry;
  74. struct mt76_desc *desc;
  75. struct list_head swq;
  76. int swq_queued;
  77. u16 head;
  78. u16 tail;
  79. int ndesc;
  80. int queued;
  81. int buf_size;
  82. u8 buf_offset;
  83. u8 hw_idx;
  84. dma_addr_t desc_dma;
  85. struct sk_buff *rx_head;
  86. };
  87. struct mt76_queue_ops {
  88. int (*init)(struct mt76_dev *dev);
  89. int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
  90. int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
  91. struct mt76_queue_buf *buf, int nbufs, u32 info,
  92. struct sk_buff *skb, void *txwi);
  93. void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
  94. int *len, u32 *info, bool *more);
  95. void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
  96. void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
  97. bool flush);
  98. void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
  99. };
  100. enum mt76_wcid_flags {
  101. MT_WCID_FLAG_CHECK_PS,
  102. MT_WCID_FLAG_PS,
  103. };
  104. struct mt76_wcid {
  105. struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
  106. struct work_struct aggr_work;
  107. unsigned long flags;
  108. u8 idx;
  109. u8 hw_key_idx;
  110. u8 sta:1;
  111. u8 rx_check_pn;
  112. u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
  113. __le16 tx_rate;
  114. bool tx_rate_set;
  115. u8 tx_rate_nss;
  116. s8 max_txpwr_adj;
  117. bool sw_iv;
  118. };
  119. struct mt76_txq {
  120. struct list_head list;
  121. struct mt76_queue *hwq;
  122. struct mt76_wcid *wcid;
  123. struct sk_buff_head retry_q;
  124. u16 agg_ssn;
  125. bool send_bar;
  126. bool aggr;
  127. };
  128. struct mt76_txwi_cache {
  129. u32 txwi[8];
  130. dma_addr_t dma_addr;
  131. struct list_head list;
  132. };
  133. struct mt76_rx_tid {
  134. struct rcu_head rcu_head;
  135. struct mt76_dev *dev;
  136. spinlock_t lock;
  137. struct delayed_work reorder_work;
  138. u16 head;
  139. u8 size;
  140. u8 nframes;
  141. u8 started:1, stopped:1, timer_pending:1;
  142. struct sk_buff *reorder_buf[];
  143. };
  144. enum {
  145. MT76_STATE_INITIALIZED,
  146. MT76_STATE_RUNNING,
  147. MT76_SCANNING,
  148. MT76_RESET,
  149. };
  150. struct mt76_hw_cap {
  151. bool has_2ghz;
  152. bool has_5ghz;
  153. };
  154. struct mt76_driver_ops {
  155. u16 txwi_size;
  156. void (*update_survey)(struct mt76_dev *dev);
  157. int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
  158. struct sk_buff *skb, struct mt76_queue *q,
  159. struct mt76_wcid *wcid,
  160. struct ieee80211_sta *sta, u32 *tx_info);
  161. void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
  162. struct mt76_queue_entry *e, bool flush);
  163. void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
  164. struct sk_buff *skb);
  165. void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
  166. void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
  167. bool ps);
  168. };
  169. struct mt76_channel_state {
  170. u64 cc_active;
  171. u64 cc_busy;
  172. };
  173. struct mt76_sband {
  174. struct ieee80211_supported_band sband;
  175. struct mt76_channel_state *chan;
  176. };
  177. struct mt76_dev {
  178. struct ieee80211_hw *hw;
  179. struct cfg80211_chan_def chandef;
  180. struct ieee80211_channel *main_chan;
  181. spinlock_t lock;
  182. spinlock_t cc_lock;
  183. const struct mt76_bus_ops *bus;
  184. const struct mt76_driver_ops *drv;
  185. void __iomem *regs;
  186. struct device *dev;
  187. struct net_device napi_dev;
  188. struct napi_struct napi[__MT_RXQ_MAX];
  189. struct sk_buff_head rx_skb[__MT_RXQ_MAX];
  190. struct list_head txwi_cache;
  191. struct mt76_queue q_tx[__MT_TXQ_MAX];
  192. struct mt76_queue q_rx[__MT_RXQ_MAX];
  193. const struct mt76_queue_ops *queue_ops;
  194. u8 macaddr[ETH_ALEN];
  195. u32 rev;
  196. unsigned long state;
  197. u8 antenna_mask;
  198. struct mt76_sband sband_2g;
  199. struct mt76_sband sband_5g;
  200. struct debugfs_blob_wrapper eeprom;
  201. struct debugfs_blob_wrapper otp;
  202. struct mt76_hw_cap cap;
  203. u32 debugfs_reg;
  204. struct led_classdev led_cdev;
  205. char led_name[32];
  206. bool led_al;
  207. u8 led_pin;
  208. };
  209. enum mt76_phy_type {
  210. MT_PHY_TYPE_CCK,
  211. MT_PHY_TYPE_OFDM,
  212. MT_PHY_TYPE_HT,
  213. MT_PHY_TYPE_HT_GF,
  214. MT_PHY_TYPE_VHT,
  215. };
  216. struct mt76_rate_power {
  217. union {
  218. struct {
  219. s8 cck[4];
  220. s8 ofdm[8];
  221. s8 ht[16];
  222. s8 vht[10];
  223. };
  224. s8 all[38];
  225. };
  226. };
  227. struct mt76_rx_status {
  228. struct mt76_wcid *wcid;
  229. unsigned long reorder_time;
  230. u8 iv[6];
  231. u8 aggr:1;
  232. u8 tid;
  233. u16 seqno;
  234. u16 freq;
  235. u32 flag;
  236. u8 enc_flags;
  237. u8 encoding:2, bw:3;
  238. u8 rate_idx;
  239. u8 nss;
  240. u8 band;
  241. u8 signal;
  242. u8 chains;
  243. s8 chain_signal[IEEE80211_MAX_CHAINS];
  244. };
  245. #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
  246. #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
  247. #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
  248. #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
  249. #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
  250. #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
  251. #define mt76_get_field(_dev, _reg, _field) \
  252. FIELD_GET(_field, mt76_rr(dev, _reg))
  253. #define mt76_rmw_field(_dev, _reg, _field, _val) \
  254. mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
  255. #define mt76_hw(dev) (dev)->mt76.hw
  256. bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
  257. int timeout);
  258. #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
  259. bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
  260. int timeout);
  261. #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
  262. void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
  263. static inline u16 mt76_chip(struct mt76_dev *dev)
  264. {
  265. return dev->rev >> 16;
  266. }
  267. static inline u16 mt76_rev(struct mt76_dev *dev)
  268. {
  269. return dev->rev & 0xffff;
  270. }
  271. #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
  272. #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
  273. #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
  274. #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
  275. #define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__)
  276. #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
  277. #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
  278. #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
  279. static inline struct mt76_channel_state *
  280. mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
  281. {
  282. struct mt76_sband *msband;
  283. int idx;
  284. if (c->band == NL80211_BAND_2GHZ)
  285. msband = &dev->sband_2g;
  286. else
  287. msband = &dev->sband_5g;
  288. idx = c - &msband->sband.channels[0];
  289. return &msband->chan[idx];
  290. }
  291. int mt76_register_device(struct mt76_dev *dev, bool vht,
  292. struct ieee80211_rate *rates, int n_rates);
  293. void mt76_unregister_device(struct mt76_dev *dev);
  294. struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
  295. int mt76_eeprom_init(struct mt76_dev *dev, int len);
  296. void mt76_eeprom_override(struct mt76_dev *dev);
  297. static inline struct ieee80211_txq *
  298. mtxq_to_txq(struct mt76_txq *mtxq)
  299. {
  300. void *ptr = mtxq;
  301. return container_of(ptr, struct ieee80211_txq, drv_priv);
  302. }
  303. static inline struct ieee80211_sta *
  304. wcid_to_sta(struct mt76_wcid *wcid)
  305. {
  306. void *ptr = wcid;
  307. if (!wcid || !wcid->sta)
  308. return NULL;
  309. return container_of(ptr, struct ieee80211_sta, drv_priv);
  310. }
  311. int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
  312. struct sk_buff *skb, struct mt76_wcid *wcid,
  313. struct ieee80211_sta *sta);
  314. void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
  315. void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
  316. struct mt76_wcid *wcid, struct sk_buff *skb);
  317. void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
  318. void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
  319. void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
  320. void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
  321. bool send_bar);
  322. void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
  323. void mt76_txq_schedule_all(struct mt76_dev *dev);
  324. void mt76_release_buffered_frames(struct ieee80211_hw *hw,
  325. struct ieee80211_sta *sta,
  326. u16 tids, int nframes,
  327. enum ieee80211_frame_release_type reason,
  328. bool more_data);
  329. void mt76_set_channel(struct mt76_dev *dev);
  330. int mt76_get_survey(struct ieee80211_hw *hw, int idx,
  331. struct survey_info *survey);
  332. void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
  333. int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
  334. u16 ssn, u8 size);
  335. void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
  336. void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
  337. struct ieee80211_key_conf *key);
  338. /* internal */
  339. void mt76_tx_free(struct mt76_dev *dev);
  340. void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
  341. void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
  342. int queue);
  343. void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q);
  344. void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
  345. #endif