fsl-edma-common.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
  4. // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
  5. #include <linux/dmapool.h>
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include "fsl-edma-common.h"
  9. #define EDMA_CR 0x00
  10. #define EDMA_ES 0x04
  11. #define EDMA_ERQ 0x0C
  12. #define EDMA_EEI 0x14
  13. #define EDMA_SERQ 0x1B
  14. #define EDMA_CERQ 0x1A
  15. #define EDMA_SEEI 0x19
  16. #define EDMA_CEEI 0x18
  17. #define EDMA_CINT 0x1F
  18. #define EDMA_CERR 0x1E
  19. #define EDMA_SSRT 0x1D
  20. #define EDMA_CDNE 0x1C
  21. #define EDMA_INTR 0x24
  22. #define EDMA_ERR 0x2C
  23. #define EDMA64_ERQH 0x08
  24. #define EDMA64_EEIH 0x10
  25. #define EDMA64_SERQ 0x18
  26. #define EDMA64_CERQ 0x19
  27. #define EDMA64_SEEI 0x1a
  28. #define EDMA64_CEEI 0x1b
  29. #define EDMA64_CINT 0x1c
  30. #define EDMA64_CERR 0x1d
  31. #define EDMA64_SSRT 0x1e
  32. #define EDMA64_CDNE 0x1f
  33. #define EDMA64_INTH 0x20
  34. #define EDMA64_INTL 0x24
  35. #define EDMA64_ERRH 0x28
  36. #define EDMA64_ERRL 0x2c
  37. #define EDMA_TCD 0x1000
  38. static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
  39. {
  40. struct edma_regs *regs = &fsl_chan->edma->regs;
  41. u32 ch = fsl_chan->vchan.chan.chan_id;
  42. edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
  43. edma_writeb(fsl_chan->edma, ch, regs->serq);
  44. }
  45. void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
  46. {
  47. struct edma_regs *regs = &fsl_chan->edma->regs;
  48. u32 ch = fsl_chan->vchan.chan.chan_id;
  49. edma_writeb(fsl_chan->edma, ch, regs->cerq);
  50. edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
  51. }
  52. EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
  53. void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
  54. unsigned int slot, bool enable)
  55. {
  56. u32 ch = fsl_chan->vchan.chan.chan_id;
  57. void __iomem *muxaddr;
  58. unsigned int chans_per_mux, ch_off;
  59. chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
  60. ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
  61. muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
  62. slot = EDMAMUX_CHCFG_SOURCE(slot);
  63. if (enable)
  64. iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
  65. else
  66. iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
  67. }
  68. EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
  69. static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
  70. {
  71. switch (addr_width) {
  72. case 1:
  73. return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
  74. case 2:
  75. return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
  76. case 4:
  77. return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
  78. case 8:
  79. return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
  80. default:
  81. return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
  82. }
  83. }
  84. void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
  85. {
  86. struct fsl_edma_desc *fsl_desc;
  87. int i;
  88. fsl_desc = to_fsl_edma_desc(vdesc);
  89. for (i = 0; i < fsl_desc->n_tcds; i++)
  90. dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
  91. fsl_desc->tcd[i].ptcd);
  92. kfree(fsl_desc);
  93. }
  94. EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
  95. int fsl_edma_terminate_all(struct dma_chan *chan)
  96. {
  97. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  98. unsigned long flags;
  99. LIST_HEAD(head);
  100. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  101. fsl_edma_disable_request(fsl_chan);
  102. fsl_chan->edesc = NULL;
  103. fsl_chan->idle = true;
  104. vchan_get_all_descriptors(&fsl_chan->vchan, &head);
  105. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  106. vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
  107. return 0;
  108. }
  109. EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
  110. int fsl_edma_pause(struct dma_chan *chan)
  111. {
  112. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  113. unsigned long flags;
  114. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  115. if (fsl_chan->edesc) {
  116. fsl_edma_disable_request(fsl_chan);
  117. fsl_chan->status = DMA_PAUSED;
  118. fsl_chan->idle = true;
  119. }
  120. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  121. return 0;
  122. }
  123. EXPORT_SYMBOL_GPL(fsl_edma_pause);
  124. int fsl_edma_resume(struct dma_chan *chan)
  125. {
  126. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  127. unsigned long flags;
  128. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  129. if (fsl_chan->edesc) {
  130. fsl_edma_enable_request(fsl_chan);
  131. fsl_chan->status = DMA_IN_PROGRESS;
  132. fsl_chan->idle = false;
  133. }
  134. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  135. return 0;
  136. }
  137. EXPORT_SYMBOL_GPL(fsl_edma_resume);
  138. int fsl_edma_slave_config(struct dma_chan *chan,
  139. struct dma_slave_config *cfg)
  140. {
  141. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  142. fsl_chan->fsc.dir = cfg->direction;
  143. if (cfg->direction == DMA_DEV_TO_MEM) {
  144. fsl_chan->fsc.dev_addr = cfg->src_addr;
  145. fsl_chan->fsc.addr_width = cfg->src_addr_width;
  146. fsl_chan->fsc.burst = cfg->src_maxburst;
  147. fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
  148. } else if (cfg->direction == DMA_MEM_TO_DEV) {
  149. fsl_chan->fsc.dev_addr = cfg->dst_addr;
  150. fsl_chan->fsc.addr_width = cfg->dst_addr_width;
  151. fsl_chan->fsc.burst = cfg->dst_maxburst;
  152. fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
  153. } else
  154. return -EINVAL;
  155. return 0;
  156. }
  157. EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
  158. static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
  159. struct virt_dma_desc *vdesc, bool in_progress)
  160. {
  161. struct fsl_edma_desc *edesc = fsl_chan->edesc;
  162. struct edma_regs *regs = &fsl_chan->edma->regs;
  163. u32 ch = fsl_chan->vchan.chan.chan_id;
  164. enum dma_transfer_direction dir = fsl_chan->fsc.dir;
  165. dma_addr_t cur_addr, dma_addr;
  166. size_t len, size;
  167. int i;
  168. /* calculate the total size in this desc */
  169. for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
  170. len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
  171. * le16_to_cpu(edesc->tcd[i].vtcd->biter);
  172. if (!in_progress)
  173. return len;
  174. if (dir == DMA_MEM_TO_DEV)
  175. cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
  176. else
  177. cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
  178. /* figure out the finished and calculate the residue */
  179. for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
  180. size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
  181. * le16_to_cpu(edesc->tcd[i].vtcd->biter);
  182. if (dir == DMA_MEM_TO_DEV)
  183. dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
  184. else
  185. dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
  186. len -= size;
  187. if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
  188. len += dma_addr + size - cur_addr;
  189. break;
  190. }
  191. }
  192. return len;
  193. }
  194. enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
  195. dma_cookie_t cookie, struct dma_tx_state *txstate)
  196. {
  197. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  198. struct virt_dma_desc *vdesc;
  199. enum dma_status status;
  200. unsigned long flags;
  201. status = dma_cookie_status(chan, cookie, txstate);
  202. if (status == DMA_COMPLETE)
  203. return status;
  204. if (!txstate)
  205. return fsl_chan->status;
  206. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  207. vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
  208. if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
  209. txstate->residue =
  210. fsl_edma_desc_residue(fsl_chan, vdesc, true);
  211. else if (vdesc)
  212. txstate->residue =
  213. fsl_edma_desc_residue(fsl_chan, vdesc, false);
  214. else
  215. txstate->residue = 0;
  216. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  217. return fsl_chan->status;
  218. }
  219. EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
  220. static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
  221. struct fsl_edma_hw_tcd *tcd)
  222. {
  223. struct fsl_edma_engine *edma = fsl_chan->edma;
  224. struct edma_regs *regs = &fsl_chan->edma->regs;
  225. u32 ch = fsl_chan->vchan.chan.chan_id;
  226. /*
  227. * TCD parameters are stored in struct fsl_edma_hw_tcd in little
  228. * endian format. However, we need to load the TCD registers in
  229. * big- or little-endian obeying the eDMA engine model endian.
  230. */
  231. edma_writew(edma, 0, &regs->tcd[ch].csr);
  232. edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
  233. edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
  234. edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr);
  235. edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff);
  236. edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes);
  237. edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast);
  238. edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer);
  239. edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter);
  240. edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
  241. edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
  242. &regs->tcd[ch].dlast_sga);
  243. edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr);
  244. }
  245. static inline
  246. void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
  247. u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
  248. u16 biter, u16 doff, u32 dlast_sga, bool major_int,
  249. bool disable_req, bool enable_sg)
  250. {
  251. u16 csr = 0;
  252. /*
  253. * eDMA hardware SGs require the TCDs to be stored in little
  254. * endian format irrespective of the register endian model.
  255. * So we put the value in little endian in memory, waiting
  256. * for fsl_edma_set_tcd_regs doing the swap.
  257. */
  258. tcd->saddr = cpu_to_le32(src);
  259. tcd->daddr = cpu_to_le32(dst);
  260. tcd->attr = cpu_to_le16(attr);
  261. tcd->soff = cpu_to_le16(soff);
  262. tcd->nbytes = cpu_to_le32(nbytes);
  263. tcd->slast = cpu_to_le32(slast);
  264. tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
  265. tcd->doff = cpu_to_le16(doff);
  266. tcd->dlast_sga = cpu_to_le32(dlast_sga);
  267. tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
  268. if (major_int)
  269. csr |= EDMA_TCD_CSR_INT_MAJOR;
  270. if (disable_req)
  271. csr |= EDMA_TCD_CSR_D_REQ;
  272. if (enable_sg)
  273. csr |= EDMA_TCD_CSR_E_SG;
  274. tcd->csr = cpu_to_le16(csr);
  275. }
  276. static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
  277. int sg_len)
  278. {
  279. struct fsl_edma_desc *fsl_desc;
  280. int i;
  281. fsl_desc = kzalloc(sizeof(*fsl_desc) +
  282. sizeof(struct fsl_edma_sw_tcd) *
  283. sg_len, GFP_NOWAIT);
  284. if (!fsl_desc)
  285. return NULL;
  286. fsl_desc->echan = fsl_chan;
  287. fsl_desc->n_tcds = sg_len;
  288. for (i = 0; i < sg_len; i++) {
  289. fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
  290. GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
  291. if (!fsl_desc->tcd[i].vtcd)
  292. goto err;
  293. }
  294. return fsl_desc;
  295. err:
  296. while (--i >= 0)
  297. dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
  298. fsl_desc->tcd[i].ptcd);
  299. kfree(fsl_desc);
  300. return NULL;
  301. }
  302. struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
  303. struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
  304. size_t period_len, enum dma_transfer_direction direction,
  305. unsigned long flags)
  306. {
  307. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  308. struct fsl_edma_desc *fsl_desc;
  309. dma_addr_t dma_buf_next;
  310. int sg_len, i;
  311. u32 src_addr, dst_addr, last_sg, nbytes;
  312. u16 soff, doff, iter;
  313. if (!is_slave_direction(fsl_chan->fsc.dir))
  314. return NULL;
  315. sg_len = buf_len / period_len;
  316. fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
  317. if (!fsl_desc)
  318. return NULL;
  319. fsl_desc->iscyclic = true;
  320. dma_buf_next = dma_addr;
  321. nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
  322. iter = period_len / nbytes;
  323. for (i = 0; i < sg_len; i++) {
  324. if (dma_buf_next >= dma_addr + buf_len)
  325. dma_buf_next = dma_addr;
  326. /* get next sg's physical address */
  327. last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
  328. if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
  329. src_addr = dma_buf_next;
  330. dst_addr = fsl_chan->fsc.dev_addr;
  331. soff = fsl_chan->fsc.addr_width;
  332. doff = 0;
  333. } else {
  334. src_addr = fsl_chan->fsc.dev_addr;
  335. dst_addr = dma_buf_next;
  336. soff = 0;
  337. doff = fsl_chan->fsc.addr_width;
  338. }
  339. fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
  340. fsl_chan->fsc.attr, soff, nbytes, 0, iter,
  341. iter, doff, last_sg, true, false, true);
  342. dma_buf_next += period_len;
  343. }
  344. return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
  345. }
  346. EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
  347. struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
  348. struct dma_chan *chan, struct scatterlist *sgl,
  349. unsigned int sg_len, enum dma_transfer_direction direction,
  350. unsigned long flags, void *context)
  351. {
  352. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  353. struct fsl_edma_desc *fsl_desc;
  354. struct scatterlist *sg;
  355. u32 src_addr, dst_addr, last_sg, nbytes;
  356. u16 soff, doff, iter;
  357. int i;
  358. if (!is_slave_direction(fsl_chan->fsc.dir))
  359. return NULL;
  360. fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
  361. if (!fsl_desc)
  362. return NULL;
  363. fsl_desc->iscyclic = false;
  364. nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
  365. for_each_sg(sgl, sg, sg_len, i) {
  366. /* get next sg's physical address */
  367. last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
  368. if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
  369. src_addr = sg_dma_address(sg);
  370. dst_addr = fsl_chan->fsc.dev_addr;
  371. soff = fsl_chan->fsc.addr_width;
  372. doff = 0;
  373. } else {
  374. src_addr = fsl_chan->fsc.dev_addr;
  375. dst_addr = sg_dma_address(sg);
  376. soff = 0;
  377. doff = fsl_chan->fsc.addr_width;
  378. }
  379. iter = sg_dma_len(sg) / nbytes;
  380. if (i < sg_len - 1) {
  381. last_sg = fsl_desc->tcd[(i + 1)].ptcd;
  382. fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
  383. dst_addr, fsl_chan->fsc.attr, soff,
  384. nbytes, 0, iter, iter, doff, last_sg,
  385. false, false, true);
  386. } else {
  387. last_sg = 0;
  388. fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
  389. dst_addr, fsl_chan->fsc.attr, soff,
  390. nbytes, 0, iter, iter, doff, last_sg,
  391. true, true, false);
  392. }
  393. }
  394. return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
  395. }
  396. EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
  397. void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
  398. {
  399. struct virt_dma_desc *vdesc;
  400. vdesc = vchan_next_desc(&fsl_chan->vchan);
  401. if (!vdesc)
  402. return;
  403. fsl_chan->edesc = to_fsl_edma_desc(vdesc);
  404. fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
  405. fsl_edma_enable_request(fsl_chan);
  406. fsl_chan->status = DMA_IN_PROGRESS;
  407. fsl_chan->idle = false;
  408. }
  409. EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
  410. void fsl_edma_issue_pending(struct dma_chan *chan)
  411. {
  412. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  413. unsigned long flags;
  414. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  415. if (unlikely(fsl_chan->pm_state != RUNNING)) {
  416. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  417. /* cannot submit due to suspend */
  418. return;
  419. }
  420. if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
  421. fsl_edma_xfer_desc(fsl_chan);
  422. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  423. }
  424. EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
  425. int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
  426. {
  427. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  428. fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
  429. sizeof(struct fsl_edma_hw_tcd),
  430. 32, 0);
  431. return 0;
  432. }
  433. EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
  434. void fsl_edma_free_chan_resources(struct dma_chan *chan)
  435. {
  436. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  437. unsigned long flags;
  438. LIST_HEAD(head);
  439. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  440. fsl_edma_disable_request(fsl_chan);
  441. fsl_edma_chan_mux(fsl_chan, 0, false);
  442. fsl_chan->edesc = NULL;
  443. vchan_get_all_descriptors(&fsl_chan->vchan, &head);
  444. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  445. vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
  446. dma_pool_destroy(fsl_chan->tcd_pool);
  447. fsl_chan->tcd_pool = NULL;
  448. }
  449. EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
  450. void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
  451. {
  452. struct fsl_edma_chan *chan, *_chan;
  453. list_for_each_entry_safe(chan, _chan,
  454. &dmadev->channels, vchan.chan.device_node) {
  455. list_del(&chan->vchan.chan.device_node);
  456. tasklet_kill(&chan->vchan.task);
  457. }
  458. }
  459. EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
  460. /*
  461. * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
  462. * register offsets are different compared to ColdFire mcf5441x 64 channels
  463. * edma (here called "v2").
  464. *
  465. * This function sets up register offsets as per proper declared version
  466. * so must be called in xxx_edma_probe() just after setting the
  467. * edma "version" and "membase" appropriately.
  468. */
  469. void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
  470. {
  471. edma->regs.cr = edma->membase + EDMA_CR;
  472. edma->regs.es = edma->membase + EDMA_ES;
  473. edma->regs.erql = edma->membase + EDMA_ERQ;
  474. edma->regs.eeil = edma->membase + EDMA_EEI;
  475. edma->regs.serq = edma->membase + ((edma->version == v1) ?
  476. EDMA_SERQ : EDMA64_SERQ);
  477. edma->regs.cerq = edma->membase + ((edma->version == v1) ?
  478. EDMA_CERQ : EDMA64_CERQ);
  479. edma->regs.seei = edma->membase + ((edma->version == v1) ?
  480. EDMA_SEEI : EDMA64_SEEI);
  481. edma->regs.ceei = edma->membase + ((edma->version == v1) ?
  482. EDMA_CEEI : EDMA64_CEEI);
  483. edma->regs.cint = edma->membase + ((edma->version == v1) ?
  484. EDMA_CINT : EDMA64_CINT);
  485. edma->regs.cerr = edma->membase + ((edma->version == v1) ?
  486. EDMA_CERR : EDMA64_CERR);
  487. edma->regs.ssrt = edma->membase + ((edma->version == v1) ?
  488. EDMA_SSRT : EDMA64_SSRT);
  489. edma->regs.cdne = edma->membase + ((edma->version == v1) ?
  490. EDMA_CDNE : EDMA64_CDNE);
  491. edma->regs.intl = edma->membase + ((edma->version == v1) ?
  492. EDMA_INTR : EDMA64_INTL);
  493. edma->regs.errl = edma->membase + ((edma->version == v1) ?
  494. EDMA_ERR : EDMA64_ERRL);
  495. if (edma->version == v2) {
  496. edma->regs.erqh = edma->membase + EDMA64_ERQH;
  497. edma->regs.eeih = edma->membase + EDMA64_EEIH;
  498. edma->regs.errh = edma->membase + EDMA64_ERRH;
  499. edma->regs.inth = edma->membase + EDMA64_INTH;
  500. }
  501. edma->regs.tcd = edma->membase + EDMA_TCD;
  502. }
  503. EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
  504. MODULE_LICENSE("GPL v2");