internal.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  4. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  5. *
  6. * Portions of this file are derived from the ipw3945 project, as well
  7. * as portions of the ieee80211 subsystem header files.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along with
  19. * this program; if not, write to the Free Software Foundation, Inc.,
  20. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  21. *
  22. * The full GNU General Public License is included in this distribution in the
  23. * file called LICENSE.
  24. *
  25. * Contact Information:
  26. * Intel Linux Wireless <ilw@linux.intel.com>
  27. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  28. *
  29. *****************************************************************************/
  30. #ifndef __iwl_trans_int_pcie_h__
  31. #define __iwl_trans_int_pcie_h__
  32. #include <linux/spinlock.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/skbuff.h>
  35. #include <linux/wait.h>
  36. #include <linux/pci.h>
  37. #include <linux/timer.h>
  38. #include "iwl-fh.h"
  39. #include "iwl-csr.h"
  40. #include "iwl-trans.h"
  41. #include "iwl-debug.h"
  42. #include "iwl-io.h"
  43. #include "iwl-op-mode.h"
  44. struct iwl_host_cmd;
  45. /*This file includes the declaration that are internal to the
  46. * trans_pcie layer */
  47. struct iwl_rx_mem_buffer {
  48. dma_addr_t page_dma;
  49. struct page *page;
  50. struct list_head list;
  51. };
  52. /**
  53. * struct isr_statistics - interrupt statistics
  54. *
  55. */
  56. struct isr_statistics {
  57. u32 hw;
  58. u32 sw;
  59. u32 err_code;
  60. u32 sch;
  61. u32 alive;
  62. u32 rfkill;
  63. u32 ctkill;
  64. u32 wakeup;
  65. u32 rx;
  66. u32 tx;
  67. u32 unhandled;
  68. };
  69. /**
  70. * struct iwl_rxq - Rx queue
  71. * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
  72. * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
  73. * @pool:
  74. * @queue:
  75. * @read: Shared index to newest available Rx buffer
  76. * @write: Shared index to oldest written Rx packet
  77. * @free_count: Number of pre-allocated buffers in rx_free
  78. * @write_actual:
  79. * @rx_free: list of free SKBs for use
  80. * @rx_used: List of Rx buffers with no SKB
  81. * @need_update: flag to indicate we need to update read/write index
  82. * @rb_stts: driver's pointer to receive buffer status
  83. * @rb_stts_dma: bus address of receive buffer status
  84. * @lock:
  85. *
  86. * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  87. */
  88. struct iwl_rxq {
  89. __le32 *bd;
  90. dma_addr_t bd_dma;
  91. struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
  92. struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
  93. u32 read;
  94. u32 write;
  95. u32 free_count;
  96. u32 write_actual;
  97. struct list_head rx_free;
  98. struct list_head rx_used;
  99. bool need_update;
  100. struct iwl_rb_status *rb_stts;
  101. dma_addr_t rb_stts_dma;
  102. spinlock_t lock;
  103. };
  104. struct iwl_dma_ptr {
  105. dma_addr_t dma;
  106. void *addr;
  107. size_t size;
  108. };
  109. /**
  110. * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
  111. * @index -- current index
  112. */
  113. static inline int iwl_queue_inc_wrap(int index)
  114. {
  115. return ++index & (TFD_QUEUE_SIZE_MAX - 1);
  116. }
  117. /**
  118. * iwl_queue_dec_wrap - decrement queue index, wrap back to end
  119. * @index -- current index
  120. */
  121. static inline int iwl_queue_dec_wrap(int index)
  122. {
  123. return --index & (TFD_QUEUE_SIZE_MAX - 1);
  124. }
  125. struct iwl_cmd_meta {
  126. /* only for SYNC commands, iff the reply skb is wanted */
  127. struct iwl_host_cmd *source;
  128. u32 flags;
  129. };
  130. /*
  131. * Generic queue structure
  132. *
  133. * Contains common data for Rx and Tx queues.
  134. *
  135. * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
  136. * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
  137. * there might be HW changes in the future). For the normal TX
  138. * queues, n_window, which is the size of the software queue data
  139. * is also 256; however, for the command queue, n_window is only
  140. * 32 since we don't need so many commands pending. Since the HW
  141. * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
  142. * the software buffers (in the variables @meta, @txb in struct
  143. * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
  144. * the same struct) have 256.
  145. * This means that we end up with the following:
  146. * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
  147. * SW entries: | 0 | ... | 31 |
  148. * where N is a number between 0 and 7. This means that the SW
  149. * data is a window overlayed over the HW queue.
  150. */
  151. struct iwl_queue {
  152. int write_ptr; /* 1-st empty entry (index) host_w*/
  153. int read_ptr; /* last used entry (index) host_r*/
  154. /* use for monitoring and recovering the stuck queue */
  155. dma_addr_t dma_addr; /* physical addr for BD's */
  156. int n_window; /* safe queue window */
  157. u32 id;
  158. int low_mark; /* low watermark, resume queue if free
  159. * space more than this */
  160. int high_mark; /* high watermark, stop queue if free
  161. * space less than this */
  162. };
  163. #define TFD_TX_CMD_SLOTS 256
  164. #define TFD_CMD_SLOTS 32
  165. /*
  166. * The FH will write back to the first TB only, so we need
  167. * to copy some data into the buffer regardless of whether
  168. * it should be mapped or not. This indicates how big the
  169. * first TB must be to include the scratch buffer. Since
  170. * the scratch is 4 bytes at offset 12, it's 16 now. If we
  171. * make it bigger then allocations will be bigger and copy
  172. * slower, so that's probably not useful.
  173. */
  174. #define IWL_HCMD_SCRATCHBUF_SIZE 16
  175. struct iwl_pcie_txq_entry {
  176. struct iwl_device_cmd *cmd;
  177. struct sk_buff *skb;
  178. /* buffer to free after command completes */
  179. const void *free_buf;
  180. struct iwl_cmd_meta meta;
  181. };
  182. struct iwl_pcie_txq_scratch_buf {
  183. struct iwl_cmd_header hdr;
  184. u8 buf[8];
  185. __le32 scratch;
  186. };
  187. /**
  188. * struct iwl_txq - Tx Queue for DMA
  189. * @q: generic Rx/Tx queue descriptor
  190. * @tfds: transmit frame descriptors (DMA memory)
  191. * @scratchbufs: start of command headers, including scratch buffers, for
  192. * the writeback -- this is DMA memory and an array holding one buffer
  193. * for each command on the queue
  194. * @scratchbufs_dma: DMA address for the scratchbufs start
  195. * @entries: transmit entries (driver state)
  196. * @lock: queue lock
  197. * @stuck_timer: timer that fires if queue gets stuck
  198. * @trans_pcie: pointer back to transport (for timer)
  199. * @need_update: indicates need to update read/write index
  200. * @active: stores if queue is active
  201. * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
  202. *
  203. * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
  204. * descriptors) and required locking structures.
  205. */
  206. struct iwl_txq {
  207. struct iwl_queue q;
  208. struct iwl_tfd *tfds;
  209. struct iwl_pcie_txq_scratch_buf *scratchbufs;
  210. dma_addr_t scratchbufs_dma;
  211. struct iwl_pcie_txq_entry *entries;
  212. spinlock_t lock;
  213. struct timer_list stuck_timer;
  214. struct iwl_trans_pcie *trans_pcie;
  215. bool need_update;
  216. u8 active;
  217. bool ampdu;
  218. };
  219. static inline dma_addr_t
  220. iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  221. {
  222. return txq->scratchbufs_dma +
  223. sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
  224. }
  225. /**
  226. * struct iwl_trans_pcie - PCIe transport specific data
  227. * @rxq: all the RX queue data
  228. * @rx_replenish: work that will be called when buffers need to be allocated
  229. * @drv - pointer to iwl_drv
  230. * @trans: pointer to the generic transport area
  231. * @scd_base_addr: scheduler sram base address in SRAM
  232. * @scd_bc_tbls: pointer to the byte count table of the scheduler
  233. * @kw: keep warm address
  234. * @pci_dev: basic pci-network driver stuff
  235. * @hw_base: pci hardware address support
  236. * @ucode_write_complete: indicates that the ucode has been copied.
  237. * @ucode_write_waitq: wait queue for uCode load
  238. * @cmd_queue - command queue number
  239. * @rx_buf_size_8k: 8 kB RX buffer size
  240. * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
  241. * @scd_set_active: should the transport configure the SCD for HCMD queue
  242. * @rx_page_order: page order for receive buffer size
  243. * @wd_timeout: queue watchdog timeout (jiffies)
  244. * @reg_lock: protect hw register access
  245. * @cmd_in_flight: true when we have a host command in flight
  246. * @fw_mon_phys: physical address of the buffer for the firmware monitor
  247. * @fw_mon_page: points to the first page of the buffer for the firmware monitor
  248. * @fw_mon_size: size of the buffer for the firmware monitor
  249. */
  250. struct iwl_trans_pcie {
  251. struct iwl_rxq rxq;
  252. struct work_struct rx_replenish;
  253. struct iwl_trans *trans;
  254. struct iwl_drv *drv;
  255. struct net_device napi_dev;
  256. struct napi_struct napi;
  257. /* INT ICT Table */
  258. __le32 *ict_tbl;
  259. dma_addr_t ict_tbl_dma;
  260. int ict_index;
  261. bool use_ict;
  262. struct isr_statistics isr_stats;
  263. spinlock_t irq_lock;
  264. u32 inta_mask;
  265. u32 scd_base_addr;
  266. struct iwl_dma_ptr scd_bc_tbls;
  267. struct iwl_dma_ptr kw;
  268. struct iwl_txq *txq;
  269. unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
  270. unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
  271. /* PCI bus related data */
  272. struct pci_dev *pci_dev;
  273. void __iomem *hw_base;
  274. bool ucode_write_complete;
  275. wait_queue_head_t ucode_write_waitq;
  276. wait_queue_head_t wait_command_queue;
  277. u8 cmd_queue;
  278. u8 cmd_fifo;
  279. u8 n_no_reclaim_cmds;
  280. u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
  281. bool rx_buf_size_8k;
  282. bool bc_table_dword;
  283. bool scd_set_active;
  284. u32 rx_page_order;
  285. const char *const *command_names;
  286. /* queue watchdog */
  287. unsigned long wd_timeout;
  288. /*protect hw register */
  289. spinlock_t reg_lock;
  290. bool cmd_in_flight;
  291. dma_addr_t fw_mon_phys;
  292. struct page *fw_mon_page;
  293. u32 fw_mon_size;
  294. };
  295. #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
  296. ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
  297. static inline struct iwl_trans *
  298. iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
  299. {
  300. return container_of((void *)trans_pcie, struct iwl_trans,
  301. trans_specific);
  302. }
  303. /*
  304. * Convention: trans API functions: iwl_trans_pcie_XXX
  305. * Other functions: iwl_pcie_XXX
  306. */
  307. struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
  308. const struct pci_device_id *ent,
  309. const struct iwl_cfg *cfg);
  310. void iwl_trans_pcie_free(struct iwl_trans *trans);
  311. /*****************************************************
  312. * RX
  313. ******************************************************/
  314. int iwl_pcie_rx_init(struct iwl_trans *trans);
  315. irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
  316. int iwl_pcie_rx_stop(struct iwl_trans *trans);
  317. void iwl_pcie_rx_free(struct iwl_trans *trans);
  318. /*****************************************************
  319. * ICT - interrupt handling
  320. ******************************************************/
  321. irqreturn_t iwl_pcie_isr(int irq, void *data);
  322. int iwl_pcie_alloc_ict(struct iwl_trans *trans);
  323. void iwl_pcie_free_ict(struct iwl_trans *trans);
  324. void iwl_pcie_reset_ict(struct iwl_trans *trans);
  325. void iwl_pcie_disable_ict(struct iwl_trans *trans);
  326. /*****************************************************
  327. * TX / HCMD
  328. ******************************************************/
  329. int iwl_pcie_tx_init(struct iwl_trans *trans);
  330. void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
  331. int iwl_pcie_tx_stop(struct iwl_trans *trans);
  332. void iwl_pcie_tx_free(struct iwl_trans *trans);
  333. void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
  334. const struct iwl_trans_txq_scd_cfg *cfg);
  335. void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
  336. bool configure_scd);
  337. int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
  338. struct iwl_device_cmd *dev_cmd, int txq_id);
  339. void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
  340. int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
  341. void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
  342. struct iwl_rx_cmd_buffer *rxb, int handler_status);
  343. void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
  344. struct sk_buff_head *skbs);
  345. void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
  346. static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
  347. {
  348. struct iwl_tfd_tb *tb = &tfd->tbs[idx];
  349. return le16_to_cpu(tb->hi_n_len) >> 4;
  350. }
  351. /*****************************************************
  352. * Error handling
  353. ******************************************************/
  354. void iwl_pcie_dump_csr(struct iwl_trans *trans);
  355. /*****************************************************
  356. * Helpers
  357. ******************************************************/
  358. static inline void iwl_disable_interrupts(struct iwl_trans *trans)
  359. {
  360. clear_bit(STATUS_INT_ENABLED, &trans->status);
  361. /* disable interrupts from uCode/NIC to host */
  362. iwl_write32(trans, CSR_INT_MASK, 0x00000000);
  363. /* acknowledge/clear/reset any interrupts still pending
  364. * from uCode or flow handler (Rx/Tx DMA) */
  365. iwl_write32(trans, CSR_INT, 0xffffffff);
  366. iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
  367. IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
  368. }
  369. static inline void iwl_enable_interrupts(struct iwl_trans *trans)
  370. {
  371. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  372. IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
  373. set_bit(STATUS_INT_ENABLED, &trans->status);
  374. trans_pcie->inta_mask = CSR_INI_SET_MASK;
  375. iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
  376. }
  377. static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
  378. {
  379. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  380. IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
  381. trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
  382. iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
  383. }
  384. static inline void iwl_wake_queue(struct iwl_trans *trans,
  385. struct iwl_txq *txq)
  386. {
  387. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  388. if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
  389. IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
  390. iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
  391. }
  392. }
  393. static inline void iwl_stop_queue(struct iwl_trans *trans,
  394. struct iwl_txq *txq)
  395. {
  396. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  397. if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
  398. iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
  399. IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
  400. } else
  401. IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
  402. txq->q.id);
  403. }
  404. static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
  405. {
  406. return q->write_ptr >= q->read_ptr ?
  407. (i >= q->read_ptr && i < q->write_ptr) :
  408. !(i < q->read_ptr && i >= q->write_ptr);
  409. }
  410. static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
  411. {
  412. return index & (q->n_window - 1);
  413. }
  414. static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie,
  415. u8 cmd)
  416. {
  417. if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
  418. return "UNKNOWN";
  419. return trans_pcie->command_names[cmd];
  420. }
  421. static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
  422. {
  423. return !(iwl_read32(trans, CSR_GP_CNTRL) &
  424. CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
  425. }
  426. static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
  427. u32 reg, u32 mask, u32 value)
  428. {
  429. u32 v;
  430. #ifdef CONFIG_IWLWIFI_DEBUG
  431. WARN_ON_ONCE(value & ~mask);
  432. #endif
  433. v = iwl_read32(trans, reg);
  434. v &= ~mask;
  435. v |= value;
  436. iwl_write32(trans, reg, v);
  437. }
  438. static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
  439. u32 reg, u32 mask)
  440. {
  441. __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
  442. }
  443. static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
  444. u32 reg, u32 mask)
  445. {
  446. __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
  447. }
  448. void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
  449. #endif /* __iwl_trans_int_pcie_h__ */