dma-jz4780.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877
  1. /*
  2. * Ingenic JZ4780 DMA controller
  3. *
  4. * Copyright (c) 2015 Imagination Technologies
  5. * Author: Alex Smith <alex@alex-smith.me.uk>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the
  9. * Free Software Foundation; either version 2 of the License, or (at your
  10. * option) any later version.
  11. */
  12. #include <linux/clk.h>
  13. #include <linux/dmapool.h>
  14. #include <linux/init.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/module.h>
  17. #include <linux/of.h>
  18. #include <linux/of_dma.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/slab.h>
  21. #include "dmaengine.h"
  22. #include "virt-dma.h"
  23. #define JZ_DMA_NR_CHANNELS 32
  24. /* Global registers. */
  25. #define JZ_DMA_REG_DMAC 0x1000
  26. #define JZ_DMA_REG_DIRQP 0x1004
  27. #define JZ_DMA_REG_DDR 0x1008
  28. #define JZ_DMA_REG_DDRS 0x100c
  29. #define JZ_DMA_REG_DMACP 0x101c
  30. #define JZ_DMA_REG_DSIRQP 0x1020
  31. #define JZ_DMA_REG_DSIRQM 0x1024
  32. #define JZ_DMA_REG_DCIRQP 0x1028
  33. #define JZ_DMA_REG_DCIRQM 0x102c
  34. /* Per-channel registers. */
  35. #define JZ_DMA_REG_CHAN(n) (n * 0x20)
  36. #define JZ_DMA_REG_DSA(n) (0x00 + JZ_DMA_REG_CHAN(n))
  37. #define JZ_DMA_REG_DTA(n) (0x04 + JZ_DMA_REG_CHAN(n))
  38. #define JZ_DMA_REG_DTC(n) (0x08 + JZ_DMA_REG_CHAN(n))
  39. #define JZ_DMA_REG_DRT(n) (0x0c + JZ_DMA_REG_CHAN(n))
  40. #define JZ_DMA_REG_DCS(n) (0x10 + JZ_DMA_REG_CHAN(n))
  41. #define JZ_DMA_REG_DCM(n) (0x14 + JZ_DMA_REG_CHAN(n))
  42. #define JZ_DMA_REG_DDA(n) (0x18 + JZ_DMA_REG_CHAN(n))
  43. #define JZ_DMA_REG_DSD(n) (0x1c + JZ_DMA_REG_CHAN(n))
  44. #define JZ_DMA_DMAC_DMAE BIT(0)
  45. #define JZ_DMA_DMAC_AR BIT(2)
  46. #define JZ_DMA_DMAC_HLT BIT(3)
  47. #define JZ_DMA_DMAC_FMSC BIT(31)
  48. #define JZ_DMA_DRT_AUTO 0x8
  49. #define JZ_DMA_DCS_CTE BIT(0)
  50. #define JZ_DMA_DCS_HLT BIT(2)
  51. #define JZ_DMA_DCS_TT BIT(3)
  52. #define JZ_DMA_DCS_AR BIT(4)
  53. #define JZ_DMA_DCS_DES8 BIT(30)
  54. #define JZ_DMA_DCM_LINK BIT(0)
  55. #define JZ_DMA_DCM_TIE BIT(1)
  56. #define JZ_DMA_DCM_STDE BIT(2)
  57. #define JZ_DMA_DCM_TSZ_SHIFT 8
  58. #define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT)
  59. #define JZ_DMA_DCM_DP_SHIFT 12
  60. #define JZ_DMA_DCM_SP_SHIFT 14
  61. #define JZ_DMA_DCM_DAI BIT(22)
  62. #define JZ_DMA_DCM_SAI BIT(23)
  63. #define JZ_DMA_SIZE_4_BYTE 0x0
  64. #define JZ_DMA_SIZE_1_BYTE 0x1
  65. #define JZ_DMA_SIZE_2_BYTE 0x2
  66. #define JZ_DMA_SIZE_16_BYTE 0x3
  67. #define JZ_DMA_SIZE_32_BYTE 0x4
  68. #define JZ_DMA_SIZE_64_BYTE 0x5
  69. #define JZ_DMA_SIZE_128_BYTE 0x6
  70. #define JZ_DMA_WIDTH_32_BIT 0x0
  71. #define JZ_DMA_WIDTH_8_BIT 0x1
  72. #define JZ_DMA_WIDTH_16_BIT 0x2
  73. #define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  74. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  75. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  76. /**
  77. * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
  78. * @dcm: value for the DCM (channel command) register
  79. * @dsa: source address
  80. * @dta: target address
  81. * @dtc: transfer count (number of blocks of the transfer size specified in DCM
  82. * to transfer) in the low 24 bits, offset of the next descriptor from the
  83. * descriptor base address in the upper 8 bits.
  84. * @sd: target/source stride difference (in stride transfer mode).
  85. * @drt: request type
  86. */
  87. struct jz4780_dma_hwdesc {
  88. uint32_t dcm;
  89. uint32_t dsa;
  90. uint32_t dta;
  91. uint32_t dtc;
  92. uint32_t sd;
  93. uint32_t drt;
  94. uint32_t reserved[2];
  95. };
  96. /* Size of allocations for hardware descriptor blocks. */
  97. #define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE
  98. #define JZ_DMA_MAX_DESC \
  99. (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc))
  100. struct jz4780_dma_desc {
  101. struct virt_dma_desc vdesc;
  102. struct jz4780_dma_hwdesc *desc;
  103. dma_addr_t desc_phys;
  104. unsigned int count;
  105. enum dma_transaction_type type;
  106. uint32_t status;
  107. };
  108. struct jz4780_dma_chan {
  109. struct virt_dma_chan vchan;
  110. unsigned int id;
  111. struct dma_pool *desc_pool;
  112. uint32_t transfer_type;
  113. uint32_t transfer_shift;
  114. struct dma_slave_config config;
  115. struct jz4780_dma_desc *desc;
  116. unsigned int curr_hwdesc;
  117. };
  118. struct jz4780_dma_dev {
  119. struct dma_device dma_device;
  120. void __iomem *base;
  121. struct clk *clk;
  122. unsigned int irq;
  123. uint32_t chan_reserved;
  124. struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS];
  125. };
  126. struct jz4780_dma_data {
  127. uint32_t transfer_type;
  128. int channel;
  129. };
  130. static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan)
  131. {
  132. return container_of(chan, struct jz4780_dma_chan, vchan.chan);
  133. }
  134. static inline struct jz4780_dma_desc *to_jz4780_dma_desc(
  135. struct virt_dma_desc *vdesc)
  136. {
  137. return container_of(vdesc, struct jz4780_dma_desc, vdesc);
  138. }
  139. static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
  140. struct jz4780_dma_chan *jzchan)
  141. {
  142. return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev,
  143. dma_device);
  144. }
  145. static inline uint32_t jz4780_dma_readl(struct jz4780_dma_dev *jzdma,
  146. unsigned int reg)
  147. {
  148. return readl(jzdma->base + reg);
  149. }
  150. static inline void jz4780_dma_writel(struct jz4780_dma_dev *jzdma,
  151. unsigned int reg, uint32_t val)
  152. {
  153. writel(val, jzdma->base + reg);
  154. }
  155. static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
  156. struct jz4780_dma_chan *jzchan, unsigned int count,
  157. enum dma_transaction_type type)
  158. {
  159. struct jz4780_dma_desc *desc;
  160. if (count > JZ_DMA_MAX_DESC)
  161. return NULL;
  162. desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
  163. if (!desc)
  164. return NULL;
  165. desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT,
  166. &desc->desc_phys);
  167. if (!desc->desc) {
  168. kfree(desc);
  169. return NULL;
  170. }
  171. desc->count = count;
  172. desc->type = type;
  173. return desc;
  174. }
  175. static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
  176. {
  177. struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc);
  178. struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan);
  179. dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys);
  180. kfree(desc);
  181. }
  182. static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord)
  183. {
  184. *ord = ffs(val) - 1;
  185. switch (*ord) {
  186. case 0:
  187. return JZ_DMA_SIZE_1_BYTE;
  188. case 1:
  189. return JZ_DMA_SIZE_2_BYTE;
  190. case 2:
  191. return JZ_DMA_SIZE_4_BYTE;
  192. case 4:
  193. return JZ_DMA_SIZE_16_BYTE;
  194. case 5:
  195. return JZ_DMA_SIZE_32_BYTE;
  196. case 6:
  197. return JZ_DMA_SIZE_64_BYTE;
  198. case 7:
  199. return JZ_DMA_SIZE_128_BYTE;
  200. default:
  201. return -EINVAL;
  202. }
  203. }
  204. static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
  205. struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
  206. enum dma_transfer_direction direction)
  207. {
  208. struct dma_slave_config *config = &jzchan->config;
  209. uint32_t width, maxburst, tsz;
  210. int ord;
  211. if (direction == DMA_MEM_TO_DEV) {
  212. desc->dcm = JZ_DMA_DCM_SAI;
  213. desc->dsa = addr;
  214. desc->dta = config->dst_addr;
  215. desc->drt = jzchan->transfer_type;
  216. width = config->dst_addr_width;
  217. maxburst = config->dst_maxburst;
  218. } else {
  219. desc->dcm = JZ_DMA_DCM_DAI;
  220. desc->dsa = config->src_addr;
  221. desc->dta = addr;
  222. desc->drt = jzchan->transfer_type;
  223. width = config->src_addr_width;
  224. maxburst = config->src_maxburst;
  225. }
  226. /*
  227. * This calculates the maximum transfer size that can be used with the
  228. * given address, length, width and maximum burst size. The address
  229. * must be aligned to the transfer size, the total length must be
  230. * divisible by the transfer size, and we must not use more than the
  231. * maximum burst specified by the user.
  232. */
  233. tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), &ord);
  234. jzchan->transfer_shift = ord;
  235. switch (width) {
  236. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  237. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  238. break;
  239. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  240. width = JZ_DMA_WIDTH_32_BIT;
  241. break;
  242. default:
  243. return -EINVAL;
  244. }
  245. desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT;
  246. desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
  247. desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
  248. desc->dtc = len >> ord;
  249. }
  250. static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
  251. struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
  252. enum dma_transfer_direction direction, unsigned long flags)
  253. {
  254. struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
  255. struct jz4780_dma_desc *desc;
  256. unsigned int i;
  257. int err;
  258. desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
  259. if (!desc)
  260. return NULL;
  261. for (i = 0; i < sg_len; i++) {
  262. err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
  263. sg_dma_address(&sgl[i]),
  264. sg_dma_len(&sgl[i]),
  265. direction);
  266. if (err < 0)
  267. return ERR_PTR(err);
  268. desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
  269. if (i != (sg_len - 1)) {
  270. /* Automatically proceeed to the next descriptor. */
  271. desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
  272. /*
  273. * The upper 8 bits of the DTC field in the descriptor
  274. * must be set to (offset from descriptor base of next
  275. * descriptor >> 4).
  276. */
  277. desc->desc[i].dtc |=
  278. (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
  279. }
  280. }
  281. return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
  282. }
  283. static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
  284. struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  285. size_t period_len, enum dma_transfer_direction direction,
  286. unsigned long flags)
  287. {
  288. struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
  289. struct jz4780_dma_desc *desc;
  290. unsigned int periods, i;
  291. int err;
  292. if (buf_len % period_len)
  293. return NULL;
  294. periods = buf_len / period_len;
  295. desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
  296. if (!desc)
  297. return NULL;
  298. for (i = 0; i < periods; i++) {
  299. err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
  300. period_len, direction);
  301. if (err < 0)
  302. return ERR_PTR(err);
  303. buf_addr += period_len;
  304. /*
  305. * Set the link bit to indicate that the controller should
  306. * automatically proceed to the next descriptor. In
  307. * jz4780_dma_begin(), this will be cleared if we need to issue
  308. * an interrupt after each period.
  309. */
  310. desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK;
  311. /*
  312. * The upper 8 bits of the DTC field in the descriptor must be
  313. * set to (offset from descriptor base of next descriptor >> 4).
  314. * If this is the last descriptor, link it back to the first,
  315. * i.e. leave offset set to 0, otherwise point to the next one.
  316. */
  317. if (i != (periods - 1)) {
  318. desc->desc[i].dtc |=
  319. (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
  320. }
  321. }
  322. return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
  323. }
  324. struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
  325. struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  326. size_t len, unsigned long flags)
  327. {
  328. struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
  329. struct jz4780_dma_desc *desc;
  330. uint32_t tsz;
  331. int ord;
  332. desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
  333. if (!desc)
  334. return NULL;
  335. tsz = jz4780_dma_transfer_size(dest | src | len, &ord);
  336. if (tsz < 0)
  337. return ERR_PTR(tsz);
  338. desc->desc[0].dsa = src;
  339. desc->desc[0].dta = dest;
  340. desc->desc[0].drt = JZ_DMA_DRT_AUTO;
  341. desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
  342. tsz << JZ_DMA_DCM_TSZ_SHIFT |
  343. JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
  344. JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
  345. desc->desc[0].dtc = len >> ord;
  346. return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
  347. }
  348. static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
  349. {
  350. struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
  351. struct virt_dma_desc *vdesc;
  352. unsigned int i;
  353. dma_addr_t desc_phys;
  354. if (!jzchan->desc) {
  355. vdesc = vchan_next_desc(&jzchan->vchan);
  356. if (!vdesc)
  357. return;
  358. list_del(&vdesc->node);
  359. jzchan->desc = to_jz4780_dma_desc(vdesc);
  360. jzchan->curr_hwdesc = 0;
  361. if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) {
  362. /*
  363. * The DMA controller doesn't support triggering an
  364. * interrupt after processing each descriptor, only
  365. * after processing an entire terminated list of
  366. * descriptors. For a cyclic DMA setup the list of
  367. * descriptors is not terminated so we can never get an
  368. * interrupt.
  369. *
  370. * If the user requested a callback for a cyclic DMA
  371. * setup then we workaround this hardware limitation
  372. * here by degrading to a set of unlinked descriptors
  373. * which we will submit in sequence in response to the
  374. * completion of processing the previous descriptor.
  375. */
  376. for (i = 0; i < jzchan->desc->count; i++)
  377. jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK;
  378. }
  379. } else {
  380. /*
  381. * There is an existing transfer, therefore this must be one
  382. * for which we unlinked the descriptors above. Advance to the
  383. * next one in the list.
  384. */
  385. jzchan->curr_hwdesc =
  386. (jzchan->curr_hwdesc + 1) % jzchan->desc->count;
  387. }
  388. /* Use 8-word descriptors. */
  389. jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), JZ_DMA_DCS_DES8);
  390. /* Write descriptor address and initiate descriptor fetch. */
  391. desc_phys = jzchan->desc->desc_phys +
  392. (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
  393. jz4780_dma_writel(jzdma, JZ_DMA_REG_DDA(jzchan->id), desc_phys);
  394. jz4780_dma_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
  395. /* Enable the channel. */
  396. jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id),
  397. JZ_DMA_DCS_DES8 | JZ_DMA_DCS_CTE);
  398. }
  399. static void jz4780_dma_issue_pending(struct dma_chan *chan)
  400. {
  401. struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
  402. unsigned long flags;
  403. spin_lock_irqsave(&jzchan->vchan.lock, flags);
  404. if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc)
  405. jz4780_dma_begin(jzchan);
  406. spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
  407. }
  408. static int jz4780_dma_terminate_all(struct jz4780_dma_chan *jzchan)
  409. {
  410. struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
  411. unsigned long flags;
  412. LIST_HEAD(head);
  413. spin_lock_irqsave(&jzchan->vchan.lock, flags);
  414. /* Clear the DMA status and stop the transfer. */
  415. jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
  416. if (jzchan->desc) {
  417. jz4780_dma_desc_free(&jzchan->desc->vdesc);
  418. jzchan->desc = NULL;
  419. }
  420. vchan_get_all_descriptors(&jzchan->vchan, &head);
  421. spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
  422. vchan_dma_desc_free_list(&jzchan->vchan, &head);
  423. return 0;
  424. }
  425. static int jz4780_dma_slave_config(struct jz4780_dma_chan *jzchan,
  426. const struct dma_slave_config *config)
  427. {
  428. if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
  429. || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
  430. return -EINVAL;
  431. /* Copy the reset of the slave configuration, it is used later. */
  432. memcpy(&jzchan->config, config, sizeof(jzchan->config));
  433. return 0;
  434. }
  435. static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
  436. struct jz4780_dma_desc *desc, unsigned int next_sg)
  437. {
  438. struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
  439. unsigned int residue, count;
  440. unsigned int i;
  441. residue = 0;
  442. for (i = next_sg; i < desc->count; i++)
  443. residue += desc->desc[i].dtc << jzchan->transfer_shift;
  444. if (next_sg != 0) {
  445. count = jz4780_dma_readl(jzdma,
  446. JZ_DMA_REG_DTC(jzchan->id));
  447. residue += count << jzchan->transfer_shift;
  448. }
  449. return residue;
  450. }
  451. static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
  452. dma_cookie_t cookie, struct dma_tx_state *txstate)
  453. {
  454. struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
  455. struct virt_dma_desc *vdesc;
  456. enum dma_status status;
  457. unsigned long flags;
  458. status = dma_cookie_status(chan, cookie, txstate);
  459. if ((status == DMA_COMPLETE) || (txstate == NULL))
  460. return status;
  461. spin_lock_irqsave(&jzchan->vchan.lock, flags);
  462. vdesc = vchan_find_desc(&jzchan->vchan, cookie);
  463. if (vdesc) {
  464. /* On the issued list, so hasn't been processed yet */
  465. txstate->residue = jz4780_dma_desc_residue(jzchan,
  466. to_jz4780_dma_desc(vdesc), 0);
  467. } else if (cookie == jzchan->desc->vdesc.tx.cookie) {
  468. txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
  469. (jzchan->curr_hwdesc + 1) % jzchan->desc->count);
  470. } else
  471. txstate->residue = 0;
  472. if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
  473. && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
  474. status = DMA_ERROR;
  475. spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
  476. return status;
  477. }
  478. static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
  479. struct jz4780_dma_chan *jzchan)
  480. {
  481. uint32_t dcs;
  482. spin_lock(&jzchan->vchan.lock);
  483. dcs = jz4780_dma_readl(jzdma, JZ_DMA_REG_DCS(jzchan->id));
  484. jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
  485. if (dcs & JZ_DMA_DCS_AR) {
  486. dev_warn(&jzchan->vchan.chan.dev->device,
  487. "address error (DCS=0x%x)\n", dcs);
  488. }
  489. if (dcs & JZ_DMA_DCS_HLT) {
  490. dev_warn(&jzchan->vchan.chan.dev->device,
  491. "channel halt (DCS=0x%x)\n", dcs);
  492. }
  493. if (jzchan->desc) {
  494. jzchan->desc->status = dcs;
  495. if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
  496. if (jzchan->desc->type == DMA_CYCLIC) {
  497. vchan_cyclic_callback(&jzchan->desc->vdesc);
  498. } else {
  499. vchan_cookie_complete(&jzchan->desc->vdesc);
  500. jzchan->desc = NULL;
  501. }
  502. jz4780_dma_begin(jzchan);
  503. }
  504. } else {
  505. dev_err(&jzchan->vchan.chan.dev->device,
  506. "channel IRQ with no active transfer\n");
  507. }
  508. spin_unlock(&jzchan->vchan.lock);
  509. }
  510. static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
  511. {
  512. struct jz4780_dma_dev *jzdma = data;
  513. uint32_t pending, dmac;
  514. int i;
  515. pending = jz4780_dma_readl(jzdma, JZ_DMA_REG_DIRQP);
  516. for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
  517. if (!(pending & (1<<i)))
  518. continue;
  519. jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
  520. }
  521. /* Clear halt and address error status of all channels. */
  522. dmac = jz4780_dma_readl(jzdma, JZ_DMA_REG_DMAC);
  523. dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
  524. jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
  525. /* Clear interrupt pending status. */
  526. jz4780_dma_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
  527. return IRQ_HANDLED;
  528. }
  529. static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan)
  530. {
  531. struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
  532. jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device),
  533. chan->device->dev,
  534. JZ_DMA_DESC_BLOCK_SIZE,
  535. PAGE_SIZE, 0);
  536. if (!jzchan->desc_pool) {
  537. dev_err(&chan->dev->device,
  538. "failed to allocate descriptor pool\n");
  539. return -ENOMEM;
  540. }
  541. return 0;
  542. }
  543. static void jz4780_dma_free_chan_resources(struct dma_chan *chan)
  544. {
  545. struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
  546. vchan_free_chan_resources(&jzchan->vchan);
  547. dma_pool_destroy(jzchan->desc_pool);
  548. jzchan->desc_pool = NULL;
  549. }
  550. static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
  551. {
  552. struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
  553. struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
  554. struct jz4780_dma_data *data = param;
  555. if (data->channel > -1) {
  556. if (data->channel != jzchan->id)
  557. return false;
  558. } else if (jzdma->chan_reserved & BIT(jzchan->id)) {
  559. return false;
  560. }
  561. jzchan->transfer_type = data->transfer_type;
  562. return true;
  563. }
  564. static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
  565. struct of_dma *ofdma)
  566. {
  567. struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
  568. dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
  569. struct jz4780_dma_data data;
  570. if (dma_spec->args_count != 2)
  571. return NULL;
  572. data.transfer_type = dma_spec->args[0];
  573. data.channel = dma_spec->args[1];
  574. if (data.channel > -1) {
  575. if (data.channel >= JZ_DMA_NR_CHANNELS) {
  576. dev_err(jzdma->dma_device.dev,
  577. "device requested non-existent channel %u\n",
  578. data.channel);
  579. return NULL;
  580. }
  581. /* Can only select a channel marked as reserved. */
  582. if (!(jzdma->chan_reserved & BIT(data.channel))) {
  583. dev_err(jzdma->dma_device.dev,
  584. "device requested unreserved channel %u\n",
  585. data.channel);
  586. return NULL;
  587. }
  588. }
  589. return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
  590. }
  591. static int jz4780_dma_probe(struct platform_device *pdev)
  592. {
  593. struct device *dev = &pdev->dev;
  594. struct jz4780_dma_dev *jzdma;
  595. struct jz4780_dma_chan *jzchan;
  596. struct dma_device *dd;
  597. struct resource *res;
  598. int i, ret;
  599. jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL);
  600. if (!jzdma)
  601. return -ENOMEM;
  602. platform_set_drvdata(pdev, jzdma);
  603. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  604. if (!res) {
  605. dev_err(dev, "failed to get I/O memory\n");
  606. return -EINVAL;
  607. }
  608. jzdma->base = devm_ioremap_resource(dev, res);
  609. if (IS_ERR(jzdma->base))
  610. return PTR_ERR(jzdma->base);
  611. jzdma->irq = platform_get_irq(pdev, 0);
  612. if (jzdma->irq < 0) {
  613. dev_err(dev, "failed to get IRQ: %d\n", ret);
  614. return jzdma->irq;
  615. }
  616. ret = devm_request_irq(dev, jzdma->irq, jz4780_dma_irq_handler, 0,
  617. dev_name(dev), jzdma);
  618. if (ret) {
  619. dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
  620. return -EINVAL;
  621. }
  622. jzdma->clk = devm_clk_get(dev, NULL);
  623. if (IS_ERR(jzdma->clk)) {
  624. dev_err(dev, "failed to get clock\n");
  625. return PTR_ERR(jzdma->clk);
  626. }
  627. clk_prepare_enable(jzdma->clk);
  628. /* Property is optional, if it doesn't exist the value will remain 0. */
  629. of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels",
  630. 0, &jzdma->chan_reserved);
  631. dd = &jzdma->dma_device;
  632. dma_cap_set(DMA_MEMCPY, dd->cap_mask);
  633. dma_cap_set(DMA_SLAVE, dd->cap_mask);
  634. dma_cap_set(DMA_CYCLIC, dd->cap_mask);
  635. dd->dev = dev;
  636. dd->copy_align = 2; /* 2^2 = 4 byte alignment */
  637. dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
  638. dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
  639. dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
  640. dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
  641. dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
  642. dd->device_config = jz4780_dma_slave_config;
  643. dd->device_terminate_all = jz4780_dma_terminate_all;
  644. dd->device_tx_status = jz4780_dma_tx_status;
  645. dd->device_issue_pending = jz4780_dma_issue_pending;
  646. dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
  647. dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
  648. dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  649. dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  650. /*
  651. * Enable DMA controller, mark all channels as not programmable.
  652. * Also set the FMSC bit - it increases MSC performance, so it makes
  653. * little sense not to enable it.
  654. */
  655. jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC,
  656. JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC);
  657. jz4780_dma_writel(jzdma, JZ_DMA_REG_DMACP, 0);
  658. INIT_LIST_HEAD(&dd->channels);
  659. for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
  660. jzchan = &jzdma->chan[i];
  661. jzchan->id = i;
  662. vchan_init(&jzchan->vchan, dd);
  663. jzchan->vchan.desc_free = jz4780_dma_desc_free;
  664. }
  665. ret = dma_async_device_register(dd);
  666. if (ret) {
  667. dev_err(dev, "failed to register device\n");
  668. goto err_disable_clk;
  669. }
  670. /* Register with OF DMA helpers. */
  671. ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate,
  672. jzdma);
  673. if (ret) {
  674. dev_err(dev, "failed to register OF DMA controller\n");
  675. goto err_unregister_dev;
  676. }
  677. dev_info(dev, "JZ4780 DMA controller initialised\n");
  678. return 0;
  679. err_unregister_dev:
  680. dma_async_device_unregister(dd);
  681. err_disable_clk:
  682. clk_disable_unprepare(jzdma->clk);
  683. return ret;
  684. }
  685. static int jz4780_dma_remove(struct platform_device *pdev)
  686. {
  687. struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
  688. of_dma_controller_free(pdev->dev.of_node);
  689. devm_free_irq(&pdev->dev, jzdma->irq, jzdma);
  690. dma_async_device_unregister(&jzdma->dma_device);
  691. return 0;
  692. }
  693. static const struct of_device_id jz4780_dma_dt_match[] = {
  694. { .compatible = "ingenic,jz4780-dma", .data = NULL },
  695. {},
  696. };
  697. MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
  698. static struct platform_driver jz4780_dma_driver = {
  699. .probe = jz4780_dma_probe,
  700. .remove = jz4780_dma_remove,
  701. .driver = {
  702. .name = "jz4780-dma",
  703. .of_match_table = of_match_ptr(jz4780_dma_dt_match),
  704. },
  705. };
  706. static int __init jz4780_dma_init(void)
  707. {
  708. return platform_driver_register(&jz4780_dma_driver);
  709. }
  710. subsys_initcall(jz4780_dma_init);
  711. static void __exit jz4780_dma_exit(void)
  712. {
  713. platform_driver_unregister(&jz4780_dma_driver);
  714. }
  715. module_exit(jz4780_dma_exit);
  716. MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
  717. MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver");
  718. MODULE_LICENSE("GPL");