mcf-edma.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
  4. // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
  5. #include <linux/module.h>
  6. #include <linux/interrupt.h>
  7. #include <linux/dmaengine.h>
  8. #include <linux/platform_device.h>
  9. #include <linux/platform_data/dma-mcf-edma.h>
  10. #include "fsl-edma-common.h"
  11. #define EDMA_CHANNELS 64
  12. #define EDMA_MASK_CH(x) ((x) & GENMASK(5, 0))
  13. static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
  14. {
  15. struct fsl_edma_engine *mcf_edma = dev_id;
  16. struct edma_regs *regs = &mcf_edma->regs;
  17. unsigned int ch;
  18. struct fsl_edma_chan *mcf_chan;
  19. u64 intmap;
  20. intmap = ioread32(regs->inth);
  21. intmap <<= 32;
  22. intmap |= ioread32(regs->intl);
  23. if (!intmap)
  24. return IRQ_NONE;
  25. for (ch = 0; ch < mcf_edma->n_chans; ch++) {
  26. if (intmap & BIT(ch)) {
  27. iowrite8(EDMA_MASK_CH(ch), regs->cint);
  28. mcf_chan = &mcf_edma->chans[ch];
  29. spin_lock(&mcf_chan->vchan.lock);
  30. if (!mcf_chan->edesc->iscyclic) {
  31. list_del(&mcf_chan->edesc->vdesc.node);
  32. vchan_cookie_complete(&mcf_chan->edesc->vdesc);
  33. mcf_chan->edesc = NULL;
  34. mcf_chan->status = DMA_COMPLETE;
  35. mcf_chan->idle = true;
  36. } else {
  37. vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
  38. }
  39. if (!mcf_chan->edesc)
  40. fsl_edma_xfer_desc(mcf_chan);
  41. spin_unlock(&mcf_chan->vchan.lock);
  42. }
  43. }
  44. return IRQ_HANDLED;
  45. }
  46. static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
  47. {
  48. struct fsl_edma_engine *mcf_edma = dev_id;
  49. struct edma_regs *regs = &mcf_edma->regs;
  50. unsigned int err, ch;
  51. err = ioread32(regs->errl);
  52. if (!err)
  53. return IRQ_NONE;
  54. for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
  55. if (err & BIT(ch)) {
  56. fsl_edma_disable_request(&mcf_edma->chans[ch]);
  57. iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
  58. mcf_edma->chans[ch].status = DMA_ERROR;
  59. mcf_edma->chans[ch].idle = true;
  60. }
  61. }
  62. err = ioread32(regs->errh);
  63. if (!err)
  64. return IRQ_NONE;
  65. for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
  66. if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
  67. fsl_edma_disable_request(&mcf_edma->chans[ch]);
  68. iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
  69. mcf_edma->chans[ch].status = DMA_ERROR;
  70. mcf_edma->chans[ch].idle = true;
  71. }
  72. }
  73. return IRQ_HANDLED;
  74. }
  75. static int mcf_edma_irq_init(struct platform_device *pdev,
  76. struct fsl_edma_engine *mcf_edma)
  77. {
  78. int ret = 0, i;
  79. struct resource *res;
  80. res = platform_get_resource_byname(pdev,
  81. IORESOURCE_IRQ, "edma-tx-00-15");
  82. if (!res)
  83. return -1;
  84. for (ret = 0, i = res->start; i <= res->end; ++i)
  85. ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
  86. if (ret)
  87. return ret;
  88. res = platform_get_resource_byname(pdev,
  89. IORESOURCE_IRQ, "edma-tx-16-55");
  90. if (!res)
  91. return -1;
  92. for (ret = 0, i = res->start; i <= res->end; ++i)
  93. ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
  94. if (ret)
  95. return ret;
  96. ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
  97. if (ret != -ENXIO) {
  98. ret = request_irq(ret, mcf_edma_tx_handler,
  99. 0, "eDMA", mcf_edma);
  100. if (ret)
  101. return ret;
  102. }
  103. ret = platform_get_irq_byname(pdev, "edma-err");
  104. if (ret != -ENXIO) {
  105. ret = request_irq(ret, mcf_edma_err_handler,
  106. 0, "eDMA", mcf_edma);
  107. if (ret)
  108. return ret;
  109. }
  110. return 0;
  111. }
  112. static void mcf_edma_irq_free(struct platform_device *pdev,
  113. struct fsl_edma_engine *mcf_edma)
  114. {
  115. int irq;
  116. struct resource *res;
  117. res = platform_get_resource_byname(pdev,
  118. IORESOURCE_IRQ, "edma-tx-00-15");
  119. if (res) {
  120. for (irq = res->start; irq <= res->end; irq++)
  121. free_irq(irq, mcf_edma);
  122. }
  123. res = platform_get_resource_byname(pdev,
  124. IORESOURCE_IRQ, "edma-tx-16-55");
  125. if (res) {
  126. for (irq = res->start; irq <= res->end; irq++)
  127. free_irq(irq, mcf_edma);
  128. }
  129. irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
  130. if (irq != -ENXIO)
  131. free_irq(irq, mcf_edma);
  132. irq = platform_get_irq_byname(pdev, "edma-err");
  133. if (irq != -ENXIO)
  134. free_irq(irq, mcf_edma);
  135. }
  136. static int mcf_edma_probe(struct platform_device *pdev)
  137. {
  138. struct mcf_edma_platform_data *pdata;
  139. struct fsl_edma_engine *mcf_edma;
  140. struct fsl_edma_chan *mcf_chan;
  141. struct edma_regs *regs;
  142. struct resource *res;
  143. int ret, i, len, chans;
  144. pdata = dev_get_platdata(&pdev->dev);
  145. if (!pdata) {
  146. dev_err(&pdev->dev, "no platform data supplied\n");
  147. return -EINVAL;
  148. }
  149. chans = pdata->dma_channels;
  150. len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
  151. mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
  152. if (!mcf_edma)
  153. return -ENOMEM;
  154. mcf_edma->n_chans = chans;
  155. /* Set up version for ColdFire edma */
  156. mcf_edma->version = v2;
  157. mcf_edma->big_endian = 1;
  158. if (!mcf_edma->n_chans) {
  159. dev_info(&pdev->dev, "setting default channel number to 64");
  160. mcf_edma->n_chans = 64;
  161. }
  162. mutex_init(&mcf_edma->fsl_edma_mutex);
  163. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  164. mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
  165. if (IS_ERR(mcf_edma->membase))
  166. return PTR_ERR(mcf_edma->membase);
  167. fsl_edma_setup_regs(mcf_edma);
  168. regs = &mcf_edma->regs;
  169. INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
  170. for (i = 0; i < mcf_edma->n_chans; i++) {
  171. struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
  172. mcf_chan->edma = mcf_edma;
  173. mcf_chan->slave_id = i;
  174. mcf_chan->idle = true;
  175. mcf_chan->vchan.desc_free = fsl_edma_free_desc;
  176. vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
  177. iowrite32(0x0, &regs->tcd[i].csr);
  178. }
  179. iowrite32(~0, regs->inth);
  180. iowrite32(~0, regs->intl);
  181. ret = mcf_edma_irq_init(pdev, mcf_edma);
  182. if (ret)
  183. return ret;
  184. dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
  185. dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
  186. dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
  187. mcf_edma->dma_dev.dev = &pdev->dev;
  188. mcf_edma->dma_dev.device_alloc_chan_resources =
  189. fsl_edma_alloc_chan_resources;
  190. mcf_edma->dma_dev.device_free_chan_resources =
  191. fsl_edma_free_chan_resources;
  192. mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
  193. mcf_edma->dma_dev.device_prep_dma_cyclic =
  194. fsl_edma_prep_dma_cyclic;
  195. mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
  196. mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
  197. mcf_edma->dma_dev.device_pause = fsl_edma_pause;
  198. mcf_edma->dma_dev.device_resume = fsl_edma_resume;
  199. mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
  200. mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
  201. mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
  202. mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
  203. mcf_edma->dma_dev.directions =
  204. BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  205. mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
  206. mcf_edma->dma_dev.filter.map = pdata->slave_map;
  207. mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
  208. platform_set_drvdata(pdev, mcf_edma);
  209. ret = dma_async_device_register(&mcf_edma->dma_dev);
  210. if (ret) {
  211. dev_err(&pdev->dev,
  212. "Can't register Freescale eDMA engine. (%d)\n", ret);
  213. return ret;
  214. }
  215. /* Enable round robin arbitration */
  216. iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
  217. return 0;
  218. }
  219. static int mcf_edma_remove(struct platform_device *pdev)
  220. {
  221. struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
  222. mcf_edma_irq_free(pdev, mcf_edma);
  223. fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
  224. dma_async_device_unregister(&mcf_edma->dma_dev);
  225. return 0;
  226. }
  227. static struct platform_driver mcf_edma_driver = {
  228. .driver = {
  229. .name = "mcf-edma",
  230. },
  231. .probe = mcf_edma_probe,
  232. .remove = mcf_edma_remove,
  233. };
  234. bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
  235. {
  236. if (chan->device->dev->driver == &mcf_edma_driver.driver) {
  237. struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
  238. return (mcf_chan->slave_id == (uintptr_t)param);
  239. }
  240. return false;
  241. }
  242. EXPORT_SYMBOL(mcf_edma_filter_fn);
  243. static int __init mcf_edma_init(void)
  244. {
  245. return platform_driver_register(&mcf_edma_driver);
  246. }
  247. subsys_initcall(mcf_edma_init);
  248. static void __exit mcf_edma_exit(void)
  249. {
  250. platform_driver_unregister(&mcf_edma_driver);
  251. }
  252. module_exit(mcf_edma_exit);
  253. MODULE_ALIAS("platform:mcf-edma");
  254. MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
  255. MODULE_LICENSE("GPL v2");