ce.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "hif.h"
  18. #include "pci.h"
  19. #include "ce.h"
  20. #include "debug.h"
  21. /*
  22. * Support for Copy Engine hardware, which is mainly used for
  23. * communication between Host and Target over a PCIe interconnect.
  24. */
  25. /*
  26. * A single CopyEngine (CE) comprises two "rings":
  27. * a source ring
  28. * a destination ring
  29. *
  30. * Each ring consists of a number of descriptors which specify
  31. * an address, length, and meta-data.
  32. *
  33. * Typically, one side of the PCIe interconnect (Host or Target)
  34. * controls one ring and the other side controls the other ring.
  35. * The source side chooses when to initiate a transfer and it
  36. * chooses what to send (buffer address, length). The destination
  37. * side keeps a supply of "anonymous receive buffers" available and
  38. * it handles incoming data as it arrives (when the destination
  39. * recieves an interrupt).
  40. *
  41. * The sender may send a simple buffer (address/length) or it may
  42. * send a small list of buffers. When a small list is sent, hardware
  43. * "gathers" these and they end up in a single destination buffer
  44. * with a single interrupt.
  45. *
  46. * There are several "contexts" managed by this layer -- more, it
  47. * may seem -- than should be needed. These are provided mainly for
  48. * maximum flexibility and especially to facilitate a simpler HIF
  49. * implementation. There are per-CopyEngine recv, send, and watermark
  50. * contexts. These are supplied by the caller when a recv, send,
  51. * or watermark handler is established and they are echoed back to
  52. * the caller when the respective callbacks are invoked. There is
  53. * also a per-transfer context supplied by the caller when a buffer
  54. * (or sendlist) is sent and when a buffer is enqueued for recv.
  55. * These per-transfer contexts are echoed back to the caller when
  56. * the buffer is sent/received.
  57. */
  58. static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
  59. u32 ce_ctrl_addr,
  60. unsigned int n)
  61. {
  62. ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
  63. }
  64. static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
  65. u32 ce_ctrl_addr)
  66. {
  67. return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
  68. }
  69. static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
  70. u32 ce_ctrl_addr,
  71. unsigned int n)
  72. {
  73. ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
  74. }
  75. static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
  76. u32 ce_ctrl_addr)
  77. {
  78. return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
  79. }
  80. static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
  81. u32 ce_ctrl_addr)
  82. {
  83. return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
  84. }
  85. static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
  86. u32 ce_ctrl_addr,
  87. unsigned int addr)
  88. {
  89. ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
  90. }
  91. static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
  92. u32 ce_ctrl_addr,
  93. unsigned int n)
  94. {
  95. ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
  96. }
  97. static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
  98. u32 ce_ctrl_addr,
  99. unsigned int n)
  100. {
  101. u32 ctrl1_addr = ath10k_pci_read32((ar),
  102. (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
  103. ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
  104. (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
  105. CE_CTRL1_DMAX_LENGTH_SET(n));
  106. }
  107. static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
  108. u32 ce_ctrl_addr,
  109. unsigned int n)
  110. {
  111. u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
  112. ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
  113. (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
  114. CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
  115. }
  116. static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
  117. u32 ce_ctrl_addr,
  118. unsigned int n)
  119. {
  120. u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
  121. ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
  122. (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
  123. CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
  124. }
  125. static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
  126. u32 ce_ctrl_addr)
  127. {
  128. return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
  129. }
  130. static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
  131. u32 ce_ctrl_addr,
  132. u32 addr)
  133. {
  134. ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
  135. }
  136. static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
  137. u32 ce_ctrl_addr,
  138. unsigned int n)
  139. {
  140. ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
  141. }
  142. static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
  143. u32 ce_ctrl_addr,
  144. unsigned int n)
  145. {
  146. u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
  147. ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
  148. (addr & ~SRC_WATERMARK_HIGH_MASK) |
  149. SRC_WATERMARK_HIGH_SET(n));
  150. }
  151. static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
  152. u32 ce_ctrl_addr,
  153. unsigned int n)
  154. {
  155. u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
  156. ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
  157. (addr & ~SRC_WATERMARK_LOW_MASK) |
  158. SRC_WATERMARK_LOW_SET(n));
  159. }
  160. static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
  161. u32 ce_ctrl_addr,
  162. unsigned int n)
  163. {
  164. u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
  165. ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
  166. (addr & ~DST_WATERMARK_HIGH_MASK) |
  167. DST_WATERMARK_HIGH_SET(n));
  168. }
  169. static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
  170. u32 ce_ctrl_addr,
  171. unsigned int n)
  172. {
  173. u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
  174. ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
  175. (addr & ~DST_WATERMARK_LOW_MASK) |
  176. DST_WATERMARK_LOW_SET(n));
  177. }
  178. static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
  179. u32 ce_ctrl_addr)
  180. {
  181. u32 host_ie_addr = ath10k_pci_read32(ar,
  182. ce_ctrl_addr + HOST_IE_ADDRESS);
  183. ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
  184. host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
  185. }
  186. static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
  187. u32 ce_ctrl_addr)
  188. {
  189. u32 host_ie_addr = ath10k_pci_read32(ar,
  190. ce_ctrl_addr + HOST_IE_ADDRESS);
  191. ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
  192. host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
  193. }
  194. static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
  195. u32 ce_ctrl_addr)
  196. {
  197. u32 host_ie_addr = ath10k_pci_read32(ar,
  198. ce_ctrl_addr + HOST_IE_ADDRESS);
  199. ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
  200. host_ie_addr & ~CE_WATERMARK_MASK);
  201. }
  202. static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
  203. u32 ce_ctrl_addr)
  204. {
  205. u32 misc_ie_addr = ath10k_pci_read32(ar,
  206. ce_ctrl_addr + MISC_IE_ADDRESS);
  207. ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
  208. misc_ie_addr | CE_ERROR_MASK);
  209. }
  210. static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
  211. u32 ce_ctrl_addr)
  212. {
  213. u32 misc_ie_addr = ath10k_pci_read32(ar,
  214. ce_ctrl_addr + MISC_IE_ADDRESS);
  215. ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
  216. misc_ie_addr & ~CE_ERROR_MASK);
  217. }
  218. static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
  219. u32 ce_ctrl_addr,
  220. unsigned int mask)
  221. {
  222. ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
  223. }
  224. /*
  225. * Guts of ath10k_ce_send, used by both ath10k_ce_send and
  226. * ath10k_ce_sendlist_send.
  227. * The caller takes responsibility for any needed locking.
  228. */
  229. static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
  230. void *per_transfer_context,
  231. u32 buffer,
  232. unsigned int nbytes,
  233. unsigned int transfer_id,
  234. unsigned int flags)
  235. {
  236. struct ath10k *ar = ce_state->ar;
  237. struct ath10k_ce_ring *src_ring = ce_state->src_ring;
  238. struct ce_desc *desc, *sdesc;
  239. unsigned int nentries_mask = src_ring->nentries_mask;
  240. unsigned int sw_index = src_ring->sw_index;
  241. unsigned int write_index = src_ring->write_index;
  242. u32 ctrl_addr = ce_state->ctrl_addr;
  243. u32 desc_flags = 0;
  244. int ret = 0;
  245. if (nbytes > ce_state->src_sz_max)
  246. ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
  247. __func__, nbytes, ce_state->src_sz_max);
  248. ret = ath10k_pci_wake(ar);
  249. if (ret)
  250. return ret;
  251. if (unlikely(CE_RING_DELTA(nentries_mask,
  252. write_index, sw_index - 1) <= 0)) {
  253. ret = -ENOSR;
  254. goto exit;
  255. }
  256. desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
  257. write_index);
  258. sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
  259. desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
  260. if (flags & CE_SEND_FLAG_GATHER)
  261. desc_flags |= CE_DESC_FLAGS_GATHER;
  262. if (flags & CE_SEND_FLAG_BYTE_SWAP)
  263. desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
  264. sdesc->addr = __cpu_to_le32(buffer);
  265. sdesc->nbytes = __cpu_to_le16(nbytes);
  266. sdesc->flags = __cpu_to_le16(desc_flags);
  267. *desc = *sdesc;
  268. src_ring->per_transfer_context[write_index] = per_transfer_context;
  269. /* Update Source Ring Write Index */
  270. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  271. /* WORKAROUND */
  272. if (!(flags & CE_SEND_FLAG_GATHER))
  273. ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
  274. src_ring->write_index = write_index;
  275. exit:
  276. ath10k_pci_sleep(ar);
  277. return ret;
  278. }
  279. int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
  280. void *per_transfer_context,
  281. u32 buffer,
  282. unsigned int nbytes,
  283. unsigned int transfer_id,
  284. unsigned int flags)
  285. {
  286. struct ath10k *ar = ce_state->ar;
  287. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  288. int ret;
  289. spin_lock_bh(&ar_pci->ce_lock);
  290. ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
  291. buffer, nbytes, transfer_id, flags);
  292. spin_unlock_bh(&ar_pci->ce_lock);
  293. return ret;
  294. }
  295. int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
  296. {
  297. struct ath10k *ar = pipe->ar;
  298. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  299. int delta;
  300. spin_lock_bh(&ar_pci->ce_lock);
  301. delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
  302. pipe->src_ring->write_index,
  303. pipe->src_ring->sw_index - 1);
  304. spin_unlock_bh(&ar_pci->ce_lock);
  305. return delta;
  306. }
  307. int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
  308. void *per_recv_context,
  309. u32 buffer)
  310. {
  311. struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
  312. u32 ctrl_addr = ce_state->ctrl_addr;
  313. struct ath10k *ar = ce_state->ar;
  314. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  315. unsigned int nentries_mask = dest_ring->nentries_mask;
  316. unsigned int write_index;
  317. unsigned int sw_index;
  318. int ret;
  319. spin_lock_bh(&ar_pci->ce_lock);
  320. write_index = dest_ring->write_index;
  321. sw_index = dest_ring->sw_index;
  322. ret = ath10k_pci_wake(ar);
  323. if (ret)
  324. goto out;
  325. if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
  326. struct ce_desc *base = dest_ring->base_addr_owner_space;
  327. struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
  328. /* Update destination descriptor */
  329. desc->addr = __cpu_to_le32(buffer);
  330. desc->nbytes = 0;
  331. dest_ring->per_transfer_context[write_index] =
  332. per_recv_context;
  333. /* Update Destination Ring Write Index */
  334. write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
  335. ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
  336. dest_ring->write_index = write_index;
  337. ret = 0;
  338. } else {
  339. ret = -EIO;
  340. }
  341. ath10k_pci_sleep(ar);
  342. out:
  343. spin_unlock_bh(&ar_pci->ce_lock);
  344. return ret;
  345. }
  346. /*
  347. * Guts of ath10k_ce_completed_recv_next.
  348. * The caller takes responsibility for any necessary locking.
  349. */
  350. static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
  351. void **per_transfer_contextp,
  352. u32 *bufferp,
  353. unsigned int *nbytesp,
  354. unsigned int *transfer_idp,
  355. unsigned int *flagsp)
  356. {
  357. struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
  358. unsigned int nentries_mask = dest_ring->nentries_mask;
  359. unsigned int sw_index = dest_ring->sw_index;
  360. struct ce_desc *base = dest_ring->base_addr_owner_space;
  361. struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
  362. struct ce_desc sdesc;
  363. u16 nbytes;
  364. /* Copy in one go for performance reasons */
  365. sdesc = *desc;
  366. nbytes = __le16_to_cpu(sdesc.nbytes);
  367. if (nbytes == 0) {
  368. /*
  369. * This closes a relatively unusual race where the Host
  370. * sees the updated DRRI before the update to the
  371. * corresponding descriptor has completed. We treat this
  372. * as a descriptor that is not yet done.
  373. */
  374. return -EIO;
  375. }
  376. desc->nbytes = 0;
  377. /* Return data from completed destination descriptor */
  378. *bufferp = __le32_to_cpu(sdesc.addr);
  379. *nbytesp = nbytes;
  380. *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
  381. if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
  382. *flagsp = CE_RECV_FLAG_SWAPPED;
  383. else
  384. *flagsp = 0;
  385. if (per_transfer_contextp)
  386. *per_transfer_contextp =
  387. dest_ring->per_transfer_context[sw_index];
  388. /* sanity */
  389. dest_ring->per_transfer_context[sw_index] = NULL;
  390. /* Update sw_index */
  391. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  392. dest_ring->sw_index = sw_index;
  393. return 0;
  394. }
  395. int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
  396. void **per_transfer_contextp,
  397. u32 *bufferp,
  398. unsigned int *nbytesp,
  399. unsigned int *transfer_idp,
  400. unsigned int *flagsp)
  401. {
  402. struct ath10k *ar = ce_state->ar;
  403. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  404. int ret;
  405. spin_lock_bh(&ar_pci->ce_lock);
  406. ret = ath10k_ce_completed_recv_next_nolock(ce_state,
  407. per_transfer_contextp,
  408. bufferp, nbytesp,
  409. transfer_idp, flagsp);
  410. spin_unlock_bh(&ar_pci->ce_lock);
  411. return ret;
  412. }
  413. int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
  414. void **per_transfer_contextp,
  415. u32 *bufferp)
  416. {
  417. struct ath10k_ce_ring *dest_ring;
  418. unsigned int nentries_mask;
  419. unsigned int sw_index;
  420. unsigned int write_index;
  421. int ret;
  422. struct ath10k *ar;
  423. struct ath10k_pci *ar_pci;
  424. dest_ring = ce_state->dest_ring;
  425. if (!dest_ring)
  426. return -EIO;
  427. ar = ce_state->ar;
  428. ar_pci = ath10k_pci_priv(ar);
  429. spin_lock_bh(&ar_pci->ce_lock);
  430. nentries_mask = dest_ring->nentries_mask;
  431. sw_index = dest_ring->sw_index;
  432. write_index = dest_ring->write_index;
  433. if (write_index != sw_index) {
  434. struct ce_desc *base = dest_ring->base_addr_owner_space;
  435. struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
  436. /* Return data from completed destination descriptor */
  437. *bufferp = __le32_to_cpu(desc->addr);
  438. if (per_transfer_contextp)
  439. *per_transfer_contextp =
  440. dest_ring->per_transfer_context[sw_index];
  441. /* sanity */
  442. dest_ring->per_transfer_context[sw_index] = NULL;
  443. /* Update sw_index */
  444. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  445. dest_ring->sw_index = sw_index;
  446. ret = 0;
  447. } else {
  448. ret = -EIO;
  449. }
  450. spin_unlock_bh(&ar_pci->ce_lock);
  451. return ret;
  452. }
  453. /*
  454. * Guts of ath10k_ce_completed_send_next.
  455. * The caller takes responsibility for any necessary locking.
  456. */
  457. static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
  458. void **per_transfer_contextp,
  459. u32 *bufferp,
  460. unsigned int *nbytesp,
  461. unsigned int *transfer_idp)
  462. {
  463. struct ath10k_ce_ring *src_ring = ce_state->src_ring;
  464. u32 ctrl_addr = ce_state->ctrl_addr;
  465. struct ath10k *ar = ce_state->ar;
  466. unsigned int nentries_mask = src_ring->nentries_mask;
  467. unsigned int sw_index = src_ring->sw_index;
  468. struct ce_desc *sdesc, *sbase;
  469. unsigned int read_index;
  470. int ret;
  471. if (src_ring->hw_index == sw_index) {
  472. /*
  473. * The SW completion index has caught up with the cached
  474. * version of the HW completion index.
  475. * Update the cached HW completion index to see whether
  476. * the SW has really caught up to the HW, or if the cached
  477. * value of the HW index has become stale.
  478. */
  479. ret = ath10k_pci_wake(ar);
  480. if (ret)
  481. return ret;
  482. src_ring->hw_index =
  483. ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
  484. src_ring->hw_index &= nentries_mask;
  485. ath10k_pci_sleep(ar);
  486. }
  487. read_index = src_ring->hw_index;
  488. if ((read_index == sw_index) || (read_index == 0xffffffff))
  489. return -EIO;
  490. sbase = src_ring->shadow_base;
  491. sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
  492. /* Return data from completed source descriptor */
  493. *bufferp = __le32_to_cpu(sdesc->addr);
  494. *nbytesp = __le16_to_cpu(sdesc->nbytes);
  495. *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
  496. CE_DESC_FLAGS_META_DATA);
  497. if (per_transfer_contextp)
  498. *per_transfer_contextp =
  499. src_ring->per_transfer_context[sw_index];
  500. /* sanity */
  501. src_ring->per_transfer_context[sw_index] = NULL;
  502. /* Update sw_index */
  503. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  504. src_ring->sw_index = sw_index;
  505. return 0;
  506. }
  507. /* NB: Modeled after ath10k_ce_completed_send_next */
  508. int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
  509. void **per_transfer_contextp,
  510. u32 *bufferp,
  511. unsigned int *nbytesp,
  512. unsigned int *transfer_idp)
  513. {
  514. struct ath10k_ce_ring *src_ring;
  515. unsigned int nentries_mask;
  516. unsigned int sw_index;
  517. unsigned int write_index;
  518. int ret;
  519. struct ath10k *ar;
  520. struct ath10k_pci *ar_pci;
  521. src_ring = ce_state->src_ring;
  522. if (!src_ring)
  523. return -EIO;
  524. ar = ce_state->ar;
  525. ar_pci = ath10k_pci_priv(ar);
  526. spin_lock_bh(&ar_pci->ce_lock);
  527. nentries_mask = src_ring->nentries_mask;
  528. sw_index = src_ring->sw_index;
  529. write_index = src_ring->write_index;
  530. if (write_index != sw_index) {
  531. struct ce_desc *base = src_ring->base_addr_owner_space;
  532. struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
  533. /* Return data from completed source descriptor */
  534. *bufferp = __le32_to_cpu(desc->addr);
  535. *nbytesp = __le16_to_cpu(desc->nbytes);
  536. *transfer_idp = MS(__le16_to_cpu(desc->flags),
  537. CE_DESC_FLAGS_META_DATA);
  538. if (per_transfer_contextp)
  539. *per_transfer_contextp =
  540. src_ring->per_transfer_context[sw_index];
  541. /* sanity */
  542. src_ring->per_transfer_context[sw_index] = NULL;
  543. /* Update sw_index */
  544. sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
  545. src_ring->sw_index = sw_index;
  546. ret = 0;
  547. } else {
  548. ret = -EIO;
  549. }
  550. spin_unlock_bh(&ar_pci->ce_lock);
  551. return ret;
  552. }
  553. int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
  554. void **per_transfer_contextp,
  555. u32 *bufferp,
  556. unsigned int *nbytesp,
  557. unsigned int *transfer_idp)
  558. {
  559. struct ath10k *ar = ce_state->ar;
  560. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  561. int ret;
  562. spin_lock_bh(&ar_pci->ce_lock);
  563. ret = ath10k_ce_completed_send_next_nolock(ce_state,
  564. per_transfer_contextp,
  565. bufferp, nbytesp,
  566. transfer_idp);
  567. spin_unlock_bh(&ar_pci->ce_lock);
  568. return ret;
  569. }
  570. /*
  571. * Guts of interrupt handler for per-engine interrupts on a particular CE.
  572. *
  573. * Invokes registered callbacks for recv_complete,
  574. * send_complete, and watermarks.
  575. */
  576. void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
  577. {
  578. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  579. struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
  580. u32 ctrl_addr = ce_state->ctrl_addr;
  581. int ret;
  582. ret = ath10k_pci_wake(ar);
  583. if (ret)
  584. return;
  585. spin_lock_bh(&ar_pci->ce_lock);
  586. /* Clear the copy-complete interrupts that will be handled here. */
  587. ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
  588. HOST_IS_COPY_COMPLETE_MASK);
  589. spin_unlock_bh(&ar_pci->ce_lock);
  590. if (ce_state->recv_cb)
  591. ce_state->recv_cb(ce_state);
  592. if (ce_state->send_cb)
  593. ce_state->send_cb(ce_state);
  594. spin_lock_bh(&ar_pci->ce_lock);
  595. /*
  596. * Misc CE interrupts are not being handled, but still need
  597. * to be cleared.
  598. */
  599. ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
  600. spin_unlock_bh(&ar_pci->ce_lock);
  601. ath10k_pci_sleep(ar);
  602. }
  603. /*
  604. * Handler for per-engine interrupts on ALL active CEs.
  605. * This is used in cases where the system is sharing a
  606. * single interrput for all CEs
  607. */
  608. void ath10k_ce_per_engine_service_any(struct ath10k *ar)
  609. {
  610. int ce_id, ret;
  611. u32 intr_summary;
  612. ret = ath10k_pci_wake(ar);
  613. if (ret)
  614. return;
  615. intr_summary = CE_INTERRUPT_SUMMARY(ar);
  616. for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
  617. if (intr_summary & (1 << ce_id))
  618. intr_summary &= ~(1 << ce_id);
  619. else
  620. /* no intr pending on this CE */
  621. continue;
  622. ath10k_ce_per_engine_service(ar, ce_id);
  623. }
  624. ath10k_pci_sleep(ar);
  625. }
  626. /*
  627. * Adjust interrupts for the copy complete handler.
  628. * If it's needed for either send or recv, then unmask
  629. * this interrupt; otherwise, mask it.
  630. *
  631. * Called with ce_lock held.
  632. */
  633. static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
  634. int disable_copy_compl_intr)
  635. {
  636. u32 ctrl_addr = ce_state->ctrl_addr;
  637. struct ath10k *ar = ce_state->ar;
  638. int ret;
  639. ret = ath10k_pci_wake(ar);
  640. if (ret)
  641. return;
  642. if ((!disable_copy_compl_intr) &&
  643. (ce_state->send_cb || ce_state->recv_cb))
  644. ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
  645. else
  646. ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
  647. ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
  648. ath10k_pci_sleep(ar);
  649. }
  650. int ath10k_ce_disable_interrupts(struct ath10k *ar)
  651. {
  652. int ce_id, ret;
  653. ret = ath10k_pci_wake(ar);
  654. if (ret)
  655. return ret;
  656. for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
  657. u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  658. ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
  659. ath10k_ce_error_intr_disable(ar, ctrl_addr);
  660. ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
  661. }
  662. ath10k_pci_sleep(ar);
  663. return 0;
  664. }
  665. void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
  666. void (*send_cb)(struct ath10k_ce_pipe *),
  667. int disable_interrupts)
  668. {
  669. struct ath10k *ar = ce_state->ar;
  670. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  671. spin_lock_bh(&ar_pci->ce_lock);
  672. ce_state->send_cb = send_cb;
  673. ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
  674. spin_unlock_bh(&ar_pci->ce_lock);
  675. }
  676. void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
  677. void (*recv_cb)(struct ath10k_ce_pipe *))
  678. {
  679. struct ath10k *ar = ce_state->ar;
  680. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  681. spin_lock_bh(&ar_pci->ce_lock);
  682. ce_state->recv_cb = recv_cb;
  683. ath10k_ce_per_engine_handler_adjust(ce_state, 0);
  684. spin_unlock_bh(&ar_pci->ce_lock);
  685. }
  686. static int ath10k_ce_init_src_ring(struct ath10k *ar,
  687. unsigned int ce_id,
  688. struct ath10k_ce_pipe *ce_state,
  689. const struct ce_attr *attr)
  690. {
  691. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  692. struct ath10k_ce_ring *src_ring;
  693. unsigned int nentries = attr->src_nentries;
  694. unsigned int ce_nbytes;
  695. u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  696. dma_addr_t base_addr;
  697. char *ptr;
  698. nentries = roundup_pow_of_two(nentries);
  699. if (ce_state->src_ring) {
  700. WARN_ON(ce_state->src_ring->nentries != nentries);
  701. return 0;
  702. }
  703. ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
  704. ptr = kzalloc(ce_nbytes, GFP_KERNEL);
  705. if (ptr == NULL)
  706. return -ENOMEM;
  707. ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
  708. src_ring = ce_state->src_ring;
  709. ptr += sizeof(struct ath10k_ce_ring);
  710. src_ring->nentries = nentries;
  711. src_ring->nentries_mask = nentries - 1;
  712. src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
  713. src_ring->sw_index &= src_ring->nentries_mask;
  714. src_ring->hw_index = src_ring->sw_index;
  715. src_ring->write_index =
  716. ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
  717. src_ring->write_index &= src_ring->nentries_mask;
  718. src_ring->per_transfer_context = (void **)ptr;
  719. /*
  720. * Legacy platforms that do not support cache
  721. * coherent DMA are unsupported
  722. */
  723. src_ring->base_addr_owner_space_unaligned =
  724. pci_alloc_consistent(ar_pci->pdev,
  725. (nentries * sizeof(struct ce_desc) +
  726. CE_DESC_RING_ALIGN),
  727. &base_addr);
  728. if (!src_ring->base_addr_owner_space_unaligned) {
  729. kfree(ce_state->src_ring);
  730. ce_state->src_ring = NULL;
  731. return -ENOMEM;
  732. }
  733. src_ring->base_addr_ce_space_unaligned = base_addr;
  734. src_ring->base_addr_owner_space = PTR_ALIGN(
  735. src_ring->base_addr_owner_space_unaligned,
  736. CE_DESC_RING_ALIGN);
  737. src_ring->base_addr_ce_space = ALIGN(
  738. src_ring->base_addr_ce_space_unaligned,
  739. CE_DESC_RING_ALIGN);
  740. /*
  741. * Also allocate a shadow src ring in regular
  742. * mem to use for faster access.
  743. */
  744. src_ring->shadow_base_unaligned =
  745. kmalloc((nentries * sizeof(struct ce_desc) +
  746. CE_DESC_RING_ALIGN), GFP_KERNEL);
  747. if (!src_ring->shadow_base_unaligned) {
  748. pci_free_consistent(ar_pci->pdev,
  749. (nentries * sizeof(struct ce_desc) +
  750. CE_DESC_RING_ALIGN),
  751. src_ring->base_addr_owner_space,
  752. src_ring->base_addr_ce_space);
  753. kfree(ce_state->src_ring);
  754. ce_state->src_ring = NULL;
  755. return -ENOMEM;
  756. }
  757. src_ring->shadow_base = PTR_ALIGN(
  758. src_ring->shadow_base_unaligned,
  759. CE_DESC_RING_ALIGN);
  760. ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
  761. src_ring->base_addr_ce_space);
  762. ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
  763. ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
  764. ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
  765. ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
  766. ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
  767. ath10k_dbg(ATH10K_DBG_BOOT,
  768. "boot ce src ring id %d entries %d base_addr %p\n",
  769. ce_id, nentries, src_ring->base_addr_owner_space);
  770. return 0;
  771. }
  772. static int ath10k_ce_init_dest_ring(struct ath10k *ar,
  773. unsigned int ce_id,
  774. struct ath10k_ce_pipe *ce_state,
  775. const struct ce_attr *attr)
  776. {
  777. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  778. struct ath10k_ce_ring *dest_ring;
  779. unsigned int nentries = attr->dest_nentries;
  780. unsigned int ce_nbytes;
  781. u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  782. dma_addr_t base_addr;
  783. char *ptr;
  784. nentries = roundup_pow_of_two(nentries);
  785. if (ce_state->dest_ring) {
  786. WARN_ON(ce_state->dest_ring->nentries != nentries);
  787. return 0;
  788. }
  789. ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
  790. ptr = kzalloc(ce_nbytes, GFP_KERNEL);
  791. if (ptr == NULL)
  792. return -ENOMEM;
  793. ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
  794. dest_ring = ce_state->dest_ring;
  795. ptr += sizeof(struct ath10k_ce_ring);
  796. dest_ring->nentries = nentries;
  797. dest_ring->nentries_mask = nentries - 1;
  798. dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
  799. dest_ring->sw_index &= dest_ring->nentries_mask;
  800. dest_ring->write_index =
  801. ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
  802. dest_ring->write_index &= dest_ring->nentries_mask;
  803. dest_ring->per_transfer_context = (void **)ptr;
  804. /*
  805. * Legacy platforms that do not support cache
  806. * coherent DMA are unsupported
  807. */
  808. dest_ring->base_addr_owner_space_unaligned =
  809. pci_alloc_consistent(ar_pci->pdev,
  810. (nentries * sizeof(struct ce_desc) +
  811. CE_DESC_RING_ALIGN),
  812. &base_addr);
  813. if (!dest_ring->base_addr_owner_space_unaligned) {
  814. kfree(ce_state->dest_ring);
  815. ce_state->dest_ring = NULL;
  816. return -ENOMEM;
  817. }
  818. dest_ring->base_addr_ce_space_unaligned = base_addr;
  819. /*
  820. * Correctly initialize memory to 0 to prevent garbage
  821. * data crashing system when download firmware
  822. */
  823. memset(dest_ring->base_addr_owner_space_unaligned, 0,
  824. nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
  825. dest_ring->base_addr_owner_space = PTR_ALIGN(
  826. dest_ring->base_addr_owner_space_unaligned,
  827. CE_DESC_RING_ALIGN);
  828. dest_ring->base_addr_ce_space = ALIGN(
  829. dest_ring->base_addr_ce_space_unaligned,
  830. CE_DESC_RING_ALIGN);
  831. ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
  832. dest_ring->base_addr_ce_space);
  833. ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
  834. ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
  835. ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
  836. ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
  837. ath10k_dbg(ATH10K_DBG_BOOT,
  838. "boot ce dest ring id %d entries %d base_addr %p\n",
  839. ce_id, nentries, dest_ring->base_addr_owner_space);
  840. return 0;
  841. }
  842. static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
  843. unsigned int ce_id,
  844. const struct ce_attr *attr)
  845. {
  846. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  847. struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
  848. u32 ctrl_addr = ath10k_ce_base_address(ce_id);
  849. spin_lock_bh(&ar_pci->ce_lock);
  850. ce_state->ar = ar;
  851. ce_state->id = ce_id;
  852. ce_state->ctrl_addr = ctrl_addr;
  853. ce_state->attr_flags = attr->flags;
  854. ce_state->src_sz_max = attr->src_sz_max;
  855. spin_unlock_bh(&ar_pci->ce_lock);
  856. return ce_state;
  857. }
  858. /*
  859. * Initialize a Copy Engine based on caller-supplied attributes.
  860. * This may be called once to initialize both source and destination
  861. * rings or it may be called twice for separate source and destination
  862. * initialization. It may be that only one side or the other is
  863. * initialized by software/firmware.
  864. */
  865. struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
  866. unsigned int ce_id,
  867. const struct ce_attr *attr)
  868. {
  869. struct ath10k_ce_pipe *ce_state;
  870. int ret;
  871. /*
  872. * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
  873. * additional TX locking checks.
  874. *
  875. * For the lack of a better place do the check here.
  876. */
  877. BUILD_BUG_ON(TARGET_NUM_MSDU_DESC >
  878. (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
  879. BUILD_BUG_ON(TARGET_10X_NUM_MSDU_DESC >
  880. (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
  881. ret = ath10k_pci_wake(ar);
  882. if (ret)
  883. return NULL;
  884. ce_state = ath10k_ce_init_state(ar, ce_id, attr);
  885. if (!ce_state) {
  886. ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
  887. goto out;
  888. }
  889. if (attr->src_nentries) {
  890. ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
  891. if (ret) {
  892. ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
  893. ce_id, ret);
  894. ath10k_ce_deinit(ce_state);
  895. ce_state = NULL;
  896. goto out;
  897. }
  898. }
  899. if (attr->dest_nentries) {
  900. ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
  901. if (ret) {
  902. ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
  903. ce_id, ret);
  904. ath10k_ce_deinit(ce_state);
  905. ce_state = NULL;
  906. goto out;
  907. }
  908. }
  909. out:
  910. ath10k_pci_sleep(ar);
  911. return ce_state;
  912. }
  913. void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
  914. {
  915. struct ath10k *ar = ce_state->ar;
  916. struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
  917. if (ce_state->src_ring) {
  918. kfree(ce_state->src_ring->shadow_base_unaligned);
  919. pci_free_consistent(ar_pci->pdev,
  920. (ce_state->src_ring->nentries *
  921. sizeof(struct ce_desc) +
  922. CE_DESC_RING_ALIGN),
  923. ce_state->src_ring->base_addr_owner_space,
  924. ce_state->src_ring->base_addr_ce_space);
  925. kfree(ce_state->src_ring);
  926. }
  927. if (ce_state->dest_ring) {
  928. pci_free_consistent(ar_pci->pdev,
  929. (ce_state->dest_ring->nentries *
  930. sizeof(struct ce_desc) +
  931. CE_DESC_RING_ALIGN),
  932. ce_state->dest_ring->base_addr_owner_space,
  933. ce_state->dest_ring->base_addr_ce_space);
  934. kfree(ce_state->dest_ring);
  935. }
  936. ce_state->src_ring = NULL;
  937. ce_state->dest_ring = NULL;
  938. }