ce.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #ifndef _CE_H_
  18. #define _CE_H_
  19. #include "hif.h"
  20. /* Maximum number of Copy Engine's supported */
  21. #define CE_COUNT_MAX 12
  22. #define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
  23. /* Descriptor rings must be aligned to this boundary */
  24. #define CE_DESC_RING_ALIGN 8
  25. #define CE_SEND_FLAG_GATHER 0x00010000
  26. /*
  27. * Copy Engine support: low-level Target-side Copy Engine API.
  28. * This is a hardware access layer used by code that understands
  29. * how to use copy engines.
  30. */
  31. struct ath10k_ce_pipe;
  32. #define CE_DESC_FLAGS_GATHER (1 << 0)
  33. #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
  34. /* Following desc flags are used in QCA99X0 */
  35. #define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2)
  36. #define CE_DESC_FLAGS_TGT_INT_DIS (1 << 3)
  37. #define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
  38. #define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb
  39. struct ce_desc {
  40. __le32 addr;
  41. __le16 nbytes;
  42. __le16 flags; /* %CE_DESC_FLAGS_ */
  43. };
  44. struct ath10k_ce_ring {
  45. /* Number of entries in this ring; must be power of 2 */
  46. unsigned int nentries;
  47. unsigned int nentries_mask;
  48. /*
  49. * For dest ring, this is the next index to be processed
  50. * by software after it was/is received into.
  51. *
  52. * For src ring, this is the last descriptor that was sent
  53. * and completion processed by software.
  54. *
  55. * Regardless of src or dest ring, this is an invariant
  56. * (modulo ring size):
  57. * write index >= read index >= sw_index
  58. */
  59. unsigned int sw_index;
  60. /* cached copy */
  61. unsigned int write_index;
  62. /*
  63. * For src ring, this is the next index not yet processed by HW.
  64. * This is a cached copy of the real HW index (read index), used
  65. * for avoiding reading the HW index register more often than
  66. * necessary.
  67. * This extends the invariant:
  68. * write index >= read index >= hw_index >= sw_index
  69. *
  70. * For dest ring, this is currently unused.
  71. */
  72. /* cached copy */
  73. unsigned int hw_index;
  74. /* Start of DMA-coherent area reserved for descriptors */
  75. /* Host address space */
  76. void *base_addr_owner_space_unaligned;
  77. /* CE address space */
  78. u32 base_addr_ce_space_unaligned;
  79. /*
  80. * Actual start of descriptors.
  81. * Aligned to descriptor-size boundary.
  82. * Points into reserved DMA-coherent area, above.
  83. */
  84. /* Host address space */
  85. void *base_addr_owner_space;
  86. /* CE address space */
  87. u32 base_addr_ce_space;
  88. /*
  89. * Start of shadow copy of descriptors, within regular memory.
  90. * Aligned to descriptor-size boundary.
  91. */
  92. void *shadow_base_unaligned;
  93. struct ce_desc *shadow_base;
  94. /* keep last */
  95. void *per_transfer_context[0];
  96. };
  97. struct ath10k_ce_pipe {
  98. struct ath10k *ar;
  99. unsigned int id;
  100. unsigned int attr_flags;
  101. u32 ctrl_addr;
  102. void (*send_cb)(struct ath10k_ce_pipe *);
  103. void (*recv_cb)(struct ath10k_ce_pipe *);
  104. unsigned int src_sz_max;
  105. struct ath10k_ce_ring *src_ring;
  106. struct ath10k_ce_ring *dest_ring;
  107. };
  108. /* Copy Engine settable attributes */
  109. struct ce_attr;
  110. /*==================Send====================*/
  111. /* ath10k_ce_send flags */
  112. #define CE_SEND_FLAG_BYTE_SWAP 1
  113. /*
  114. * Queue a source buffer to be sent to an anonymous destination buffer.
  115. * ce - which copy engine to use
  116. * buffer - address of buffer
  117. * nbytes - number of bytes to send
  118. * transfer_id - arbitrary ID; reflected to destination
  119. * flags - CE_SEND_FLAG_* values
  120. * Returns 0 on success; otherwise an error status.
  121. *
  122. * Note: If no flags are specified, use CE's default data swap mode.
  123. *
  124. * Implementation note: pushes 1 buffer to Source ring
  125. */
  126. int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
  127. void *per_transfer_send_context,
  128. u32 buffer,
  129. unsigned int nbytes,
  130. /* 14 bits */
  131. unsigned int transfer_id,
  132. unsigned int flags);
  133. int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
  134. void *per_transfer_context,
  135. u32 buffer,
  136. unsigned int nbytes,
  137. unsigned int transfer_id,
  138. unsigned int flags);
  139. void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
  140. int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
  141. /*==================Recv=======================*/
  142. int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
  143. int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
  144. int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
  145. /* recv flags */
  146. /* Data is byte-swapped */
  147. #define CE_RECV_FLAG_SWAPPED 1
  148. /*
  149. * Supply data for the next completed unprocessed receive descriptor.
  150. * Pops buffer from Dest ring.
  151. */
  152. int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
  153. void **per_transfer_contextp,
  154. u32 *bufferp,
  155. unsigned int *nbytesp,
  156. unsigned int *transfer_idp,
  157. unsigned int *flagsp);
  158. /*
  159. * Supply data for the next completed unprocessed send descriptor.
  160. * Pops 1 completed send buffer from Source ring.
  161. */
  162. int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
  163. void **per_transfer_contextp,
  164. u32 *bufferp,
  165. unsigned int *nbytesp,
  166. unsigned int *transfer_idp);
  167. int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
  168. void **per_transfer_contextp,
  169. u32 *bufferp,
  170. unsigned int *nbytesp,
  171. unsigned int *transfer_idp);
  172. /*==================CE Engine Initialization=======================*/
  173. int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
  174. const struct ce_attr *attr);
  175. void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
  176. int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
  177. const struct ce_attr *attr,
  178. void (*send_cb)(struct ath10k_ce_pipe *),
  179. void (*recv_cb)(struct ath10k_ce_pipe *));
  180. void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
  181. /*==================CE Engine Shutdown=======================*/
  182. /*
  183. * Support clean shutdown by allowing the caller to revoke
  184. * receive buffers. Target DMA must be stopped before using
  185. * this API.
  186. */
  187. int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
  188. void **per_transfer_contextp,
  189. u32 *bufferp);
  190. int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
  191. void **per_transfer_contextp,
  192. u32 *bufferp,
  193. unsigned int *nbytesp,
  194. unsigned int *transfer_idp,
  195. unsigned int *flagsp);
  196. /*
  197. * Support clean shutdown by allowing the caller to cancel
  198. * pending sends. Target DMA must be stopped before using
  199. * this API.
  200. */
  201. int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
  202. void **per_transfer_contextp,
  203. u32 *bufferp,
  204. unsigned int *nbytesp,
  205. unsigned int *transfer_idp);
  206. /*==================CE Interrupt Handlers====================*/
  207. void ath10k_ce_per_engine_service_any(struct ath10k *ar);
  208. void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
  209. int ath10k_ce_disable_interrupts(struct ath10k *ar);
  210. void ath10k_ce_enable_interrupts(struct ath10k *ar);
  211. /* ce_attr.flags values */
  212. /* Use NonSnooping PCIe accesses? */
  213. #define CE_ATTR_NO_SNOOP 1
  214. /* Byte swap data words */
  215. #define CE_ATTR_BYTE_SWAP_DATA 2
  216. /* Swizzle descriptors? */
  217. #define CE_ATTR_SWIZZLE_DESCRIPTORS 4
  218. /* no interrupt on copy completion */
  219. #define CE_ATTR_DIS_INTR 8
  220. /* Attributes of an instance of a Copy Engine */
  221. struct ce_attr {
  222. /* CE_ATTR_* values */
  223. unsigned int flags;
  224. /* #entries in source ring - Must be a power of 2 */
  225. unsigned int src_nentries;
  226. /*
  227. * Max source send size for this CE.
  228. * This is also the minimum size of a destination buffer.
  229. */
  230. unsigned int src_sz_max;
  231. /* #entries in destination ring - Must be a power of 2 */
  232. unsigned int dest_nentries;
  233. };
  234. #define SR_BA_ADDRESS 0x0000
  235. #define SR_SIZE_ADDRESS 0x0004
  236. #define DR_BA_ADDRESS 0x0008
  237. #define DR_SIZE_ADDRESS 0x000c
  238. #define CE_CMD_ADDRESS 0x0018
  239. #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17
  240. #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
  241. #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
  242. #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
  243. (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
  244. CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
  245. #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16
  246. #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
  247. #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
  248. #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
  249. (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
  250. CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
  251. #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
  252. (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
  253. CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
  254. #define CE_CTRL1_DMAX_LENGTH_MSB 15
  255. #define CE_CTRL1_DMAX_LENGTH_LSB 0
  256. #define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
  257. #define CE_CTRL1_DMAX_LENGTH_GET(x) \
  258. (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
  259. #define CE_CTRL1_DMAX_LENGTH_SET(x) \
  260. (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
  261. #define CE_CTRL1_ADDRESS 0x0010
  262. #define CE_CTRL1_HW_MASK 0x0007ffff
  263. #define CE_CTRL1_SW_MASK 0x0007ffff
  264. #define CE_CTRL1_HW_WRITE_MASK 0x00000000
  265. #define CE_CTRL1_SW_WRITE_MASK 0x0007ffff
  266. #define CE_CTRL1_RSTMASK 0xffffffff
  267. #define CE_CTRL1_RESET 0x00000080
  268. #define CE_CMD_HALT_STATUS_MSB 3
  269. #define CE_CMD_HALT_STATUS_LSB 3
  270. #define CE_CMD_HALT_STATUS_MASK 0x00000008
  271. #define CE_CMD_HALT_STATUS_GET(x) \
  272. (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
  273. #define CE_CMD_HALT_STATUS_SET(x) \
  274. (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
  275. #define CE_CMD_HALT_STATUS_RESET 0
  276. #define CE_CMD_HALT_MSB 0
  277. #define CE_CMD_HALT_MASK 0x00000001
  278. #define HOST_IE_COPY_COMPLETE_MSB 0
  279. #define HOST_IE_COPY_COMPLETE_LSB 0
  280. #define HOST_IE_COPY_COMPLETE_MASK 0x00000001
  281. #define HOST_IE_COPY_COMPLETE_GET(x) \
  282. (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
  283. #define HOST_IE_COPY_COMPLETE_SET(x) \
  284. (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
  285. #define HOST_IE_COPY_COMPLETE_RESET 0
  286. #define HOST_IE_ADDRESS 0x002c
  287. #define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
  288. #define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
  289. #define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
  290. #define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
  291. #define HOST_IS_COPY_COMPLETE_MASK 0x00000001
  292. #define HOST_IS_ADDRESS 0x0030
  293. #define MISC_IE_ADDRESS 0x0034
  294. #define MISC_IS_AXI_ERR_MASK 0x00000400
  295. #define MISC_IS_DST_ADDR_ERR_MASK 0x00000200
  296. #define MISC_IS_SRC_LEN_ERR_MASK 0x00000100
  297. #define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
  298. #define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
  299. #define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
  300. #define MISC_IS_ADDRESS 0x0038
  301. #define SR_WR_INDEX_ADDRESS 0x003c
  302. #define DST_WR_INDEX_ADDRESS 0x0040
  303. #define CURRENT_SRRI_ADDRESS 0x0044
  304. #define CURRENT_DRRI_ADDRESS 0x0048
  305. #define SRC_WATERMARK_LOW_MSB 31
  306. #define SRC_WATERMARK_LOW_LSB 16
  307. #define SRC_WATERMARK_LOW_MASK 0xffff0000
  308. #define SRC_WATERMARK_LOW_GET(x) \
  309. (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
  310. #define SRC_WATERMARK_LOW_SET(x) \
  311. (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
  312. #define SRC_WATERMARK_LOW_RESET 0
  313. #define SRC_WATERMARK_HIGH_MSB 15
  314. #define SRC_WATERMARK_HIGH_LSB 0
  315. #define SRC_WATERMARK_HIGH_MASK 0x0000ffff
  316. #define SRC_WATERMARK_HIGH_GET(x) \
  317. (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
  318. #define SRC_WATERMARK_HIGH_SET(x) \
  319. (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
  320. #define SRC_WATERMARK_HIGH_RESET 0
  321. #define SRC_WATERMARK_ADDRESS 0x004c
  322. #define DST_WATERMARK_LOW_LSB 16
  323. #define DST_WATERMARK_LOW_MASK 0xffff0000
  324. #define DST_WATERMARK_LOW_SET(x) \
  325. (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
  326. #define DST_WATERMARK_LOW_RESET 0
  327. #define DST_WATERMARK_HIGH_MSB 15
  328. #define DST_WATERMARK_HIGH_LSB 0
  329. #define DST_WATERMARK_HIGH_MASK 0x0000ffff
  330. #define DST_WATERMARK_HIGH_GET(x) \
  331. (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
  332. #define DST_WATERMARK_HIGH_SET(x) \
  333. (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
  334. #define DST_WATERMARK_HIGH_RESET 0
  335. #define DST_WATERMARK_ADDRESS 0x0050
  336. static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
  337. {
  338. return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
  339. }
  340. #define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
  341. HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
  342. HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
  343. HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
  344. #define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
  345. MISC_IS_DST_ADDR_ERR_MASK | \
  346. MISC_IS_SRC_LEN_ERR_MASK | \
  347. MISC_IS_DST_MAX_LEN_VIO_MASK | \
  348. MISC_IS_DST_RING_OVERFLOW_MASK | \
  349. MISC_IS_SRC_RING_OVERFLOW_MASK)
  350. #define CE_SRC_RING_TO_DESC(baddr, idx) \
  351. (&(((struct ce_desc *)baddr)[idx]))
  352. #define CE_DEST_RING_TO_DESC(baddr, idx) \
  353. (&(((struct ce_desc *)baddr)[idx]))
  354. /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
  355. #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
  356. (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
  357. #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
  358. #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
  359. ar->regs->ce_wrap_intr_sum_host_msi_lsb
  360. #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \
  361. ar->regs->ce_wrap_intr_sum_host_msi_mask
  362. #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
  363. (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
  364. CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
  365. #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
  366. #define CE_INTERRUPT_SUMMARY(ar) \
  367. CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
  368. ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
  369. CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
  370. #endif /* _CE_H_ */