mpc512x_dma.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101
  1. /*
  2. * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
  3. * Copyright (C) Semihalf 2009
  4. * Copyright (C) Ilya Yanok, Emcraft Systems 2010
  5. * Copyright (C) Alexander Popov, Promcontroller 2014
  6. *
  7. * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
  8. * (defines, structures and comments) was taken from MPC5121 DMA driver
  9. * written by Hongjun Chen <hong-jun.chen@freescale.com>.
  10. *
  11. * Approved as OSADL project by a majority of OSADL members and funded
  12. * by OSADL membership fees in 2009; for details see www.osadl.org.
  13. *
  14. * This program is free software; you can redistribute it and/or modify it
  15. * under the terms of the GNU General Public License as published by the Free
  16. * Software Foundation; either version 2 of the License, or (at your option)
  17. * any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but WITHOUT
  20. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  21. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  22. * more details.
  23. *
  24. * You should have received a copy of the GNU General Public License along with
  25. * this program; if not, write to the Free Software Foundation, Inc., 59
  26. * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  27. *
  28. * The full GNU General Public License is included in this distribution in the
  29. * file called COPYING.
  30. */
  31. /*
  32. * MPC512x and MPC8308 DMA driver. It supports
  33. * memory to memory data transfers (tested using dmatest module) and
  34. * data transfers between memory and peripheral I/O memory
  35. * by means of slave scatter/gather with these limitations:
  36. * - chunked transfers (described by s/g lists with more than one item)
  37. * are refused as long as proper support for scatter/gather is missing;
  38. * - transfers on MPC8308 always start from software as this SoC appears
  39. * not to have external request lines for peripheral flow control;
  40. * - only peripheral devices with 4-byte FIFO access register are supported;
  41. * - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently
  42. * source and destination addresses must be 4-byte aligned
  43. * and transfer size must be aligned on (4 * maxburst) boundary;
  44. */
  45. #include <linux/module.h>
  46. #include <linux/dmaengine.h>
  47. #include <linux/dma-mapping.h>
  48. #include <linux/interrupt.h>
  49. #include <linux/io.h>
  50. #include <linux/slab.h>
  51. #include <linux/of_address.h>
  52. #include <linux/of_device.h>
  53. #include <linux/of_irq.h>
  54. #include <linux/of_dma.h>
  55. #include <linux/of_platform.h>
  56. #include <linux/random.h>
  57. #include "dmaengine.h"
  58. /* Number of DMA Transfer descriptors allocated per channel */
  59. #define MPC_DMA_DESCRIPTORS 64
  60. /* Macro definitions */
  61. #define MPC_DMA_TCD_OFFSET 0x1000
  62. /*
  63. * Maximum channel counts for individual hardware variants
  64. * and the maximum channel count over all supported controllers,
  65. * used for data structure size
  66. */
  67. #define MPC8308_DMACHAN_MAX 16
  68. #define MPC512x_DMACHAN_MAX 64
  69. #define MPC_DMA_CHANNELS 64
  70. /* Arbitration mode of group and channel */
  71. #define MPC_DMA_DMACR_EDCG (1 << 31)
  72. #define MPC_DMA_DMACR_ERGA (1 << 3)
  73. #define MPC_DMA_DMACR_ERCA (1 << 2)
  74. /* Error codes */
  75. #define MPC_DMA_DMAES_VLD (1 << 31)
  76. #define MPC_DMA_DMAES_GPE (1 << 15)
  77. #define MPC_DMA_DMAES_CPE (1 << 14)
  78. #define MPC_DMA_DMAES_ERRCHN(err) \
  79. (((err) >> 8) & 0x3f)
  80. #define MPC_DMA_DMAES_SAE (1 << 7)
  81. #define MPC_DMA_DMAES_SOE (1 << 6)
  82. #define MPC_DMA_DMAES_DAE (1 << 5)
  83. #define MPC_DMA_DMAES_DOE (1 << 4)
  84. #define MPC_DMA_DMAES_NCE (1 << 3)
  85. #define MPC_DMA_DMAES_SGE (1 << 2)
  86. #define MPC_DMA_DMAES_SBE (1 << 1)
  87. #define MPC_DMA_DMAES_DBE (1 << 0)
  88. #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
  89. #define MPC_DMA_TSIZE_1 0x00
  90. #define MPC_DMA_TSIZE_2 0x01
  91. #define MPC_DMA_TSIZE_4 0x02
  92. #define MPC_DMA_TSIZE_16 0x04
  93. #define MPC_DMA_TSIZE_32 0x05
  94. /* MPC5121 DMA engine registers */
  95. struct __attribute__ ((__packed__)) mpc_dma_regs {
  96. /* 0x00 */
  97. u32 dmacr; /* DMA control register */
  98. u32 dmaes; /* DMA error status */
  99. /* 0x08 */
  100. u32 dmaerqh; /* DMA enable request high(channels 63~32) */
  101. u32 dmaerql; /* DMA enable request low(channels 31~0) */
  102. u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
  103. u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
  104. /* 0x18 */
  105. u8 dmaserq; /* DMA set enable request */
  106. u8 dmacerq; /* DMA clear enable request */
  107. u8 dmaseei; /* DMA set enable error interrupt */
  108. u8 dmaceei; /* DMA clear enable error interrupt */
  109. /* 0x1c */
  110. u8 dmacint; /* DMA clear interrupt request */
  111. u8 dmacerr; /* DMA clear error */
  112. u8 dmassrt; /* DMA set start bit */
  113. u8 dmacdne; /* DMA clear DONE status bit */
  114. /* 0x20 */
  115. u32 dmainth; /* DMA interrupt request high(ch63~32) */
  116. u32 dmaintl; /* DMA interrupt request low(ch31~0) */
  117. u32 dmaerrh; /* DMA error high(ch63~32) */
  118. u32 dmaerrl; /* DMA error low(ch31~0) */
  119. /* 0x30 */
  120. u32 dmahrsh; /* DMA hw request status high(ch63~32) */
  121. u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
  122. union {
  123. u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
  124. u32 dmagpor; /* (General purpose register on MPC8308) */
  125. };
  126. u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
  127. /* 0x40 ~ 0xff */
  128. u32 reserve0[48]; /* Reserved */
  129. /* 0x100 */
  130. u8 dchpri[MPC_DMA_CHANNELS];
  131. /* DMA channels(0~63) priority */
  132. };
  133. struct __attribute__ ((__packed__)) mpc_dma_tcd {
  134. /* 0x00 */
  135. u32 saddr; /* Source address */
  136. u32 smod:5; /* Source address modulo */
  137. u32 ssize:3; /* Source data transfer size */
  138. u32 dmod:5; /* Destination address modulo */
  139. u32 dsize:3; /* Destination data transfer size */
  140. u32 soff:16; /* Signed source address offset */
  141. /* 0x08 */
  142. u32 nbytes; /* Inner "minor" byte count */
  143. u32 slast; /* Last source address adjustment */
  144. u32 daddr; /* Destination address */
  145. /* 0x14 */
  146. u32 citer_elink:1; /* Enable channel-to-channel linking on
  147. * minor loop complete
  148. */
  149. u32 citer_linkch:6; /* Link channel for minor loop complete */
  150. u32 citer:9; /* Current "major" iteration count */
  151. u32 doff:16; /* Signed destination address offset */
  152. /* 0x18 */
  153. u32 dlast_sga; /* Last Destination address adjustment/scatter
  154. * gather address
  155. */
  156. /* 0x1c */
  157. u32 biter_elink:1; /* Enable channel-to-channel linking on major
  158. * loop complete
  159. */
  160. u32 biter_linkch:6;
  161. u32 biter:9; /* Beginning "major" iteration count */
  162. u32 bwc:2; /* Bandwidth control */
  163. u32 major_linkch:6; /* Link channel number */
  164. u32 done:1; /* Channel done */
  165. u32 active:1; /* Channel active */
  166. u32 major_elink:1; /* Enable channel-to-channel linking on major
  167. * loop complete
  168. */
  169. u32 e_sg:1; /* Enable scatter/gather processing */
  170. u32 d_req:1; /* Disable request */
  171. u32 int_half:1; /* Enable an interrupt when major counter is
  172. * half complete
  173. */
  174. u32 int_maj:1; /* Enable an interrupt when major iteration
  175. * count completes
  176. */
  177. u32 start:1; /* Channel start */
  178. };
  179. struct mpc_dma_desc {
  180. struct dma_async_tx_descriptor desc;
  181. struct mpc_dma_tcd *tcd;
  182. dma_addr_t tcd_paddr;
  183. int error;
  184. struct list_head node;
  185. int will_access_peripheral;
  186. };
  187. struct mpc_dma_chan {
  188. struct dma_chan chan;
  189. struct list_head free;
  190. struct list_head prepared;
  191. struct list_head queued;
  192. struct list_head active;
  193. struct list_head completed;
  194. struct mpc_dma_tcd *tcd;
  195. dma_addr_t tcd_paddr;
  196. /* Settings for access to peripheral FIFO */
  197. dma_addr_t src_per_paddr;
  198. u32 src_tcd_nunits;
  199. dma_addr_t dst_per_paddr;
  200. u32 dst_tcd_nunits;
  201. /* Lock for this structure */
  202. spinlock_t lock;
  203. };
  204. struct mpc_dma {
  205. struct dma_device dma;
  206. struct tasklet_struct tasklet;
  207. struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
  208. struct mpc_dma_regs __iomem *regs;
  209. struct mpc_dma_tcd __iomem *tcd;
  210. int irq;
  211. int irq2;
  212. uint error_status;
  213. int is_mpc8308;
  214. /* Lock for error_status field in this structure */
  215. spinlock_t error_status_lock;
  216. };
  217. #define DRV_NAME "mpc512x_dma"
  218. /* Convert struct dma_chan to struct mpc_dma_chan */
  219. static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
  220. {
  221. return container_of(c, struct mpc_dma_chan, chan);
  222. }
  223. /* Convert struct dma_chan to struct mpc_dma */
  224. static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
  225. {
  226. struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
  227. return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
  228. }
  229. /*
  230. * Execute all queued DMA descriptors.
  231. *
  232. * Following requirements must be met while calling mpc_dma_execute():
  233. * a) mchan->lock is acquired,
  234. * b) mchan->active list is empty,
  235. * c) mchan->queued list contains at least one entry.
  236. */
  237. static void mpc_dma_execute(struct mpc_dma_chan *mchan)
  238. {
  239. struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
  240. struct mpc_dma_desc *first = NULL;
  241. struct mpc_dma_desc *prev = NULL;
  242. struct mpc_dma_desc *mdesc;
  243. int cid = mchan->chan.chan_id;
  244. while (!list_empty(&mchan->queued)) {
  245. mdesc = list_first_entry(&mchan->queued,
  246. struct mpc_dma_desc, node);
  247. /*
  248. * Grab either several mem-to-mem transfer descriptors
  249. * or one peripheral transfer descriptor,
  250. * don't mix mem-to-mem and peripheral transfer descriptors
  251. * within the same 'active' list.
  252. */
  253. if (mdesc->will_access_peripheral) {
  254. if (list_empty(&mchan->active))
  255. list_move_tail(&mdesc->node, &mchan->active);
  256. break;
  257. } else {
  258. list_move_tail(&mdesc->node, &mchan->active);
  259. }
  260. }
  261. /* Chain descriptors into one transaction */
  262. list_for_each_entry(mdesc, &mchan->active, node) {
  263. if (!first)
  264. first = mdesc;
  265. if (!prev) {
  266. prev = mdesc;
  267. continue;
  268. }
  269. prev->tcd->dlast_sga = mdesc->tcd_paddr;
  270. prev->tcd->e_sg = 1;
  271. mdesc->tcd->start = 1;
  272. prev = mdesc;
  273. }
  274. prev->tcd->int_maj = 1;
  275. /* Send first descriptor in chain into hardware */
  276. memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
  277. if (first != prev)
  278. mdma->tcd[cid].e_sg = 1;
  279. if (mdma->is_mpc8308) {
  280. /* MPC8308, no request lines, software initiated start */
  281. out_8(&mdma->regs->dmassrt, cid);
  282. } else if (first->will_access_peripheral) {
  283. /* Peripherals involved, start by external request signal */
  284. out_8(&mdma->regs->dmaserq, cid);
  285. } else {
  286. /* Memory to memory transfer, software initiated start */
  287. out_8(&mdma->regs->dmassrt, cid);
  288. }
  289. }
  290. /* Handle interrupt on one half of DMA controller (32 channels) */
  291. static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
  292. {
  293. struct mpc_dma_chan *mchan;
  294. struct mpc_dma_desc *mdesc;
  295. u32 status = is | es;
  296. int ch;
  297. while ((ch = fls(status) - 1) >= 0) {
  298. status &= ~(1 << ch);
  299. mchan = &mdma->channels[ch + off];
  300. spin_lock(&mchan->lock);
  301. out_8(&mdma->regs->dmacint, ch + off);
  302. out_8(&mdma->regs->dmacerr, ch + off);
  303. /* Check error status */
  304. if (es & (1 << ch))
  305. list_for_each_entry(mdesc, &mchan->active, node)
  306. mdesc->error = -EIO;
  307. /* Execute queued descriptors */
  308. list_splice_tail_init(&mchan->active, &mchan->completed);
  309. if (!list_empty(&mchan->queued))
  310. mpc_dma_execute(mchan);
  311. spin_unlock(&mchan->lock);
  312. }
  313. }
  314. /* Interrupt handler */
  315. static irqreturn_t mpc_dma_irq(int irq, void *data)
  316. {
  317. struct mpc_dma *mdma = data;
  318. uint es;
  319. /* Save error status register */
  320. es = in_be32(&mdma->regs->dmaes);
  321. spin_lock(&mdma->error_status_lock);
  322. if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
  323. mdma->error_status = es;
  324. spin_unlock(&mdma->error_status_lock);
  325. /* Handle interrupt on each channel */
  326. if (mdma->dma.chancnt > 32) {
  327. mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
  328. in_be32(&mdma->regs->dmaerrh), 32);
  329. }
  330. mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
  331. in_be32(&mdma->regs->dmaerrl), 0);
  332. /* Schedule tasklet */
  333. tasklet_schedule(&mdma->tasklet);
  334. return IRQ_HANDLED;
  335. }
  336. /* process completed descriptors */
  337. static void mpc_dma_process_completed(struct mpc_dma *mdma)
  338. {
  339. dma_cookie_t last_cookie = 0;
  340. struct mpc_dma_chan *mchan;
  341. struct mpc_dma_desc *mdesc;
  342. struct dma_async_tx_descriptor *desc;
  343. unsigned long flags;
  344. LIST_HEAD(list);
  345. int i;
  346. for (i = 0; i < mdma->dma.chancnt; i++) {
  347. mchan = &mdma->channels[i];
  348. /* Get all completed descriptors */
  349. spin_lock_irqsave(&mchan->lock, flags);
  350. if (!list_empty(&mchan->completed))
  351. list_splice_tail_init(&mchan->completed, &list);
  352. spin_unlock_irqrestore(&mchan->lock, flags);
  353. if (list_empty(&list))
  354. continue;
  355. /* Execute callbacks and run dependencies */
  356. list_for_each_entry(mdesc, &list, node) {
  357. desc = &mdesc->desc;
  358. if (desc->callback)
  359. desc->callback(desc->callback_param);
  360. last_cookie = desc->cookie;
  361. dma_run_dependencies(desc);
  362. }
  363. /* Free descriptors */
  364. spin_lock_irqsave(&mchan->lock, flags);
  365. list_splice_tail_init(&list, &mchan->free);
  366. mchan->chan.completed_cookie = last_cookie;
  367. spin_unlock_irqrestore(&mchan->lock, flags);
  368. }
  369. }
  370. /* DMA Tasklet */
  371. static void mpc_dma_tasklet(unsigned long data)
  372. {
  373. struct mpc_dma *mdma = (void *)data;
  374. unsigned long flags;
  375. uint es;
  376. spin_lock_irqsave(&mdma->error_status_lock, flags);
  377. es = mdma->error_status;
  378. mdma->error_status = 0;
  379. spin_unlock_irqrestore(&mdma->error_status_lock, flags);
  380. /* Print nice error report */
  381. if (es) {
  382. dev_err(mdma->dma.dev,
  383. "Hardware reported following error(s) on channel %u:\n",
  384. MPC_DMA_DMAES_ERRCHN(es));
  385. if (es & MPC_DMA_DMAES_GPE)
  386. dev_err(mdma->dma.dev, "- Group Priority Error\n");
  387. if (es & MPC_DMA_DMAES_CPE)
  388. dev_err(mdma->dma.dev, "- Channel Priority Error\n");
  389. if (es & MPC_DMA_DMAES_SAE)
  390. dev_err(mdma->dma.dev, "- Source Address Error\n");
  391. if (es & MPC_DMA_DMAES_SOE)
  392. dev_err(mdma->dma.dev, "- Source Offset"
  393. " Configuration Error\n");
  394. if (es & MPC_DMA_DMAES_DAE)
  395. dev_err(mdma->dma.dev, "- Destination Address"
  396. " Error\n");
  397. if (es & MPC_DMA_DMAES_DOE)
  398. dev_err(mdma->dma.dev, "- Destination Offset"
  399. " Configuration Error\n");
  400. if (es & MPC_DMA_DMAES_NCE)
  401. dev_err(mdma->dma.dev, "- NBytes/Citter"
  402. " Configuration Error\n");
  403. if (es & MPC_DMA_DMAES_SGE)
  404. dev_err(mdma->dma.dev, "- Scatter/Gather"
  405. " Configuration Error\n");
  406. if (es & MPC_DMA_DMAES_SBE)
  407. dev_err(mdma->dma.dev, "- Source Bus Error\n");
  408. if (es & MPC_DMA_DMAES_DBE)
  409. dev_err(mdma->dma.dev, "- Destination Bus Error\n");
  410. }
  411. mpc_dma_process_completed(mdma);
  412. }
  413. /* Submit descriptor to hardware */
  414. static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
  415. {
  416. struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
  417. struct mpc_dma_desc *mdesc;
  418. unsigned long flags;
  419. dma_cookie_t cookie;
  420. mdesc = container_of(txd, struct mpc_dma_desc, desc);
  421. spin_lock_irqsave(&mchan->lock, flags);
  422. /* Move descriptor to queue */
  423. list_move_tail(&mdesc->node, &mchan->queued);
  424. /* If channel is idle, execute all queued descriptors */
  425. if (list_empty(&mchan->active))
  426. mpc_dma_execute(mchan);
  427. /* Update cookie */
  428. cookie = dma_cookie_assign(txd);
  429. spin_unlock_irqrestore(&mchan->lock, flags);
  430. return cookie;
  431. }
  432. /* Alloc channel resources */
  433. static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
  434. {
  435. struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
  436. struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
  437. struct mpc_dma_desc *mdesc;
  438. struct mpc_dma_tcd *tcd;
  439. dma_addr_t tcd_paddr;
  440. unsigned long flags;
  441. LIST_HEAD(descs);
  442. int i;
  443. /* Alloc DMA memory for Transfer Control Descriptors */
  444. tcd = dma_alloc_coherent(mdma->dma.dev,
  445. MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
  446. &tcd_paddr, GFP_KERNEL);
  447. if (!tcd)
  448. return -ENOMEM;
  449. /* Alloc descriptors for this channel */
  450. for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
  451. mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
  452. if (!mdesc) {
  453. dev_notice(mdma->dma.dev, "Memory allocation error. "
  454. "Allocated only %u descriptors\n", i);
  455. break;
  456. }
  457. dma_async_tx_descriptor_init(&mdesc->desc, chan);
  458. mdesc->desc.flags = DMA_CTRL_ACK;
  459. mdesc->desc.tx_submit = mpc_dma_tx_submit;
  460. mdesc->tcd = &tcd[i];
  461. mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
  462. list_add_tail(&mdesc->node, &descs);
  463. }
  464. /* Return error only if no descriptors were allocated */
  465. if (i == 0) {
  466. dma_free_coherent(mdma->dma.dev,
  467. MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
  468. tcd, tcd_paddr);
  469. return -ENOMEM;
  470. }
  471. spin_lock_irqsave(&mchan->lock, flags);
  472. mchan->tcd = tcd;
  473. mchan->tcd_paddr = tcd_paddr;
  474. list_splice_tail_init(&descs, &mchan->free);
  475. spin_unlock_irqrestore(&mchan->lock, flags);
  476. /* Enable Error Interrupt */
  477. out_8(&mdma->regs->dmaseei, chan->chan_id);
  478. return 0;
  479. }
  480. /* Free channel resources */
  481. static void mpc_dma_free_chan_resources(struct dma_chan *chan)
  482. {
  483. struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
  484. struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
  485. struct mpc_dma_desc *mdesc, *tmp;
  486. struct mpc_dma_tcd *tcd;
  487. dma_addr_t tcd_paddr;
  488. unsigned long flags;
  489. LIST_HEAD(descs);
  490. spin_lock_irqsave(&mchan->lock, flags);
  491. /* Channel must be idle */
  492. BUG_ON(!list_empty(&mchan->prepared));
  493. BUG_ON(!list_empty(&mchan->queued));
  494. BUG_ON(!list_empty(&mchan->active));
  495. BUG_ON(!list_empty(&mchan->completed));
  496. /* Move data */
  497. list_splice_tail_init(&mchan->free, &descs);
  498. tcd = mchan->tcd;
  499. tcd_paddr = mchan->tcd_paddr;
  500. spin_unlock_irqrestore(&mchan->lock, flags);
  501. /* Free DMA memory used by descriptors */
  502. dma_free_coherent(mdma->dma.dev,
  503. MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
  504. tcd, tcd_paddr);
  505. /* Free descriptors */
  506. list_for_each_entry_safe(mdesc, tmp, &descs, node)
  507. kfree(mdesc);
  508. /* Disable Error Interrupt */
  509. out_8(&mdma->regs->dmaceei, chan->chan_id);
  510. }
  511. /* Send all pending descriptor to hardware */
  512. static void mpc_dma_issue_pending(struct dma_chan *chan)
  513. {
  514. /*
  515. * We are posting descriptors to the hardware as soon as
  516. * they are ready, so this function does nothing.
  517. */
  518. }
  519. /* Check request completion status */
  520. static enum dma_status
  521. mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
  522. struct dma_tx_state *txstate)
  523. {
  524. return dma_cookie_status(chan, cookie, txstate);
  525. }
  526. /* Prepare descriptor for memory to memory copy */
  527. static struct dma_async_tx_descriptor *
  528. mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
  529. size_t len, unsigned long flags)
  530. {
  531. struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
  532. struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
  533. struct mpc_dma_desc *mdesc = NULL;
  534. struct mpc_dma_tcd *tcd;
  535. unsigned long iflags;
  536. /* Get free descriptor */
  537. spin_lock_irqsave(&mchan->lock, iflags);
  538. if (!list_empty(&mchan->free)) {
  539. mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
  540. node);
  541. list_del(&mdesc->node);
  542. }
  543. spin_unlock_irqrestore(&mchan->lock, iflags);
  544. if (!mdesc) {
  545. /* try to free completed descriptors */
  546. mpc_dma_process_completed(mdma);
  547. return NULL;
  548. }
  549. mdesc->error = 0;
  550. mdesc->will_access_peripheral = 0;
  551. tcd = mdesc->tcd;
  552. /* Prepare Transfer Control Descriptor for this transaction */
  553. memset(tcd, 0, sizeof(struct mpc_dma_tcd));
  554. if (IS_ALIGNED(src | dst | len, 32)) {
  555. tcd->ssize = MPC_DMA_TSIZE_32;
  556. tcd->dsize = MPC_DMA_TSIZE_32;
  557. tcd->soff = 32;
  558. tcd->doff = 32;
  559. } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
  560. /* MPC8308 doesn't support 16 byte transfers */
  561. tcd->ssize = MPC_DMA_TSIZE_16;
  562. tcd->dsize = MPC_DMA_TSIZE_16;
  563. tcd->soff = 16;
  564. tcd->doff = 16;
  565. } else if (IS_ALIGNED(src | dst | len, 4)) {
  566. tcd->ssize = MPC_DMA_TSIZE_4;
  567. tcd->dsize = MPC_DMA_TSIZE_4;
  568. tcd->soff = 4;
  569. tcd->doff = 4;
  570. } else if (IS_ALIGNED(src | dst | len, 2)) {
  571. tcd->ssize = MPC_DMA_TSIZE_2;
  572. tcd->dsize = MPC_DMA_TSIZE_2;
  573. tcd->soff = 2;
  574. tcd->doff = 2;
  575. } else {
  576. tcd->ssize = MPC_DMA_TSIZE_1;
  577. tcd->dsize = MPC_DMA_TSIZE_1;
  578. tcd->soff = 1;
  579. tcd->doff = 1;
  580. }
  581. tcd->saddr = src;
  582. tcd->daddr = dst;
  583. tcd->nbytes = len;
  584. tcd->biter = 1;
  585. tcd->citer = 1;
  586. /* Place descriptor in prepared list */
  587. spin_lock_irqsave(&mchan->lock, iflags);
  588. list_add_tail(&mdesc->node, &mchan->prepared);
  589. spin_unlock_irqrestore(&mchan->lock, iflags);
  590. return &mdesc->desc;
  591. }
  592. static struct dma_async_tx_descriptor *
  593. mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  594. unsigned int sg_len, enum dma_transfer_direction direction,
  595. unsigned long flags, void *context)
  596. {
  597. struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
  598. struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
  599. struct mpc_dma_desc *mdesc = NULL;
  600. dma_addr_t per_paddr;
  601. u32 tcd_nunits;
  602. struct mpc_dma_tcd *tcd;
  603. unsigned long iflags;
  604. struct scatterlist *sg;
  605. size_t len;
  606. int iter, i;
  607. /* Currently there is no proper support for scatter/gather */
  608. if (sg_len != 1)
  609. return NULL;
  610. if (!is_slave_direction(direction))
  611. return NULL;
  612. for_each_sg(sgl, sg, sg_len, i) {
  613. spin_lock_irqsave(&mchan->lock, iflags);
  614. mdesc = list_first_entry(&mchan->free,
  615. struct mpc_dma_desc, node);
  616. if (!mdesc) {
  617. spin_unlock_irqrestore(&mchan->lock, iflags);
  618. /* Try to free completed descriptors */
  619. mpc_dma_process_completed(mdma);
  620. return NULL;
  621. }
  622. list_del(&mdesc->node);
  623. if (direction == DMA_DEV_TO_MEM) {
  624. per_paddr = mchan->src_per_paddr;
  625. tcd_nunits = mchan->src_tcd_nunits;
  626. } else {
  627. per_paddr = mchan->dst_per_paddr;
  628. tcd_nunits = mchan->dst_tcd_nunits;
  629. }
  630. spin_unlock_irqrestore(&mchan->lock, iflags);
  631. if (per_paddr == 0 || tcd_nunits == 0)
  632. goto err_prep;
  633. mdesc->error = 0;
  634. mdesc->will_access_peripheral = 1;
  635. /* Prepare Transfer Control Descriptor for this transaction */
  636. tcd = mdesc->tcd;
  637. memset(tcd, 0, sizeof(struct mpc_dma_tcd));
  638. if (!IS_ALIGNED(sg_dma_address(sg), 4))
  639. goto err_prep;
  640. if (direction == DMA_DEV_TO_MEM) {
  641. tcd->saddr = per_paddr;
  642. tcd->daddr = sg_dma_address(sg);
  643. tcd->soff = 0;
  644. tcd->doff = 4;
  645. } else {
  646. tcd->saddr = sg_dma_address(sg);
  647. tcd->daddr = per_paddr;
  648. tcd->soff = 4;
  649. tcd->doff = 0;
  650. }
  651. tcd->ssize = MPC_DMA_TSIZE_4;
  652. tcd->dsize = MPC_DMA_TSIZE_4;
  653. len = sg_dma_len(sg);
  654. tcd->nbytes = tcd_nunits * 4;
  655. if (!IS_ALIGNED(len, tcd->nbytes))
  656. goto err_prep;
  657. iter = len / tcd->nbytes;
  658. if (iter >= 1 << 15) {
  659. /* len is too big */
  660. goto err_prep;
  661. }
  662. /* citer_linkch contains the high bits of iter */
  663. tcd->biter = iter & 0x1ff;
  664. tcd->biter_linkch = iter >> 9;
  665. tcd->citer = tcd->biter;
  666. tcd->citer_linkch = tcd->biter_linkch;
  667. tcd->e_sg = 0;
  668. tcd->d_req = 1;
  669. /* Place descriptor in prepared list */
  670. spin_lock_irqsave(&mchan->lock, iflags);
  671. list_add_tail(&mdesc->node, &mchan->prepared);
  672. spin_unlock_irqrestore(&mchan->lock, iflags);
  673. }
  674. return &mdesc->desc;
  675. err_prep:
  676. /* Put the descriptor back */
  677. spin_lock_irqsave(&mchan->lock, iflags);
  678. list_add_tail(&mdesc->node, &mchan->free);
  679. spin_unlock_irqrestore(&mchan->lock, iflags);
  680. return NULL;
  681. }
  682. static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  683. unsigned long arg)
  684. {
  685. struct mpc_dma_chan *mchan;
  686. struct mpc_dma *mdma;
  687. struct dma_slave_config *cfg;
  688. unsigned long flags;
  689. mchan = dma_chan_to_mpc_dma_chan(chan);
  690. switch (cmd) {
  691. case DMA_TERMINATE_ALL:
  692. /* Disable channel requests */
  693. mdma = dma_chan_to_mpc_dma(chan);
  694. spin_lock_irqsave(&mchan->lock, flags);
  695. out_8(&mdma->regs->dmacerq, chan->chan_id);
  696. list_splice_tail_init(&mchan->prepared, &mchan->free);
  697. list_splice_tail_init(&mchan->queued, &mchan->free);
  698. list_splice_tail_init(&mchan->active, &mchan->free);
  699. spin_unlock_irqrestore(&mchan->lock, flags);
  700. return 0;
  701. case DMA_SLAVE_CONFIG:
  702. /*
  703. * Software constraints:
  704. * - only transfers between a peripheral device and
  705. * memory are supported;
  706. * - only peripheral devices with 4-byte FIFO access register
  707. * are supported;
  708. * - minimal transfer chunk is 4 bytes and consequently
  709. * source and destination addresses must be 4-byte aligned
  710. * and transfer size must be aligned on (4 * maxburst)
  711. * boundary;
  712. * - during the transfer RAM address is being incremented by
  713. * the size of minimal transfer chunk;
  714. * - peripheral port's address is constant during the transfer.
  715. */
  716. cfg = (void *)arg;
  717. if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
  718. cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
  719. !IS_ALIGNED(cfg->src_addr, 4) ||
  720. !IS_ALIGNED(cfg->dst_addr, 4)) {
  721. return -EINVAL;
  722. }
  723. spin_lock_irqsave(&mchan->lock, flags);
  724. mchan->src_per_paddr = cfg->src_addr;
  725. mchan->src_tcd_nunits = cfg->src_maxburst;
  726. mchan->dst_per_paddr = cfg->dst_addr;
  727. mchan->dst_tcd_nunits = cfg->dst_maxburst;
  728. /* Apply defaults */
  729. if (mchan->src_tcd_nunits == 0)
  730. mchan->src_tcd_nunits = 1;
  731. if (mchan->dst_tcd_nunits == 0)
  732. mchan->dst_tcd_nunits = 1;
  733. spin_unlock_irqrestore(&mchan->lock, flags);
  734. return 0;
  735. default:
  736. /* Unknown command */
  737. break;
  738. }
  739. return -ENXIO;
  740. }
  741. static int mpc_dma_probe(struct platform_device *op)
  742. {
  743. struct device_node *dn = op->dev.of_node;
  744. struct device *dev = &op->dev;
  745. struct dma_device *dma;
  746. struct mpc_dma *mdma;
  747. struct mpc_dma_chan *mchan;
  748. struct resource res;
  749. ulong regs_start, regs_size;
  750. int retval, i;
  751. mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
  752. if (!mdma) {
  753. dev_err(dev, "Memory exhausted!\n");
  754. retval = -ENOMEM;
  755. goto err;
  756. }
  757. mdma->irq = irq_of_parse_and_map(dn, 0);
  758. if (mdma->irq == NO_IRQ) {
  759. dev_err(dev, "Error mapping IRQ!\n");
  760. retval = -EINVAL;
  761. goto err;
  762. }
  763. if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
  764. mdma->is_mpc8308 = 1;
  765. mdma->irq2 = irq_of_parse_and_map(dn, 1);
  766. if (mdma->irq2 == NO_IRQ) {
  767. dev_err(dev, "Error mapping IRQ!\n");
  768. retval = -EINVAL;
  769. goto err_dispose1;
  770. }
  771. }
  772. retval = of_address_to_resource(dn, 0, &res);
  773. if (retval) {
  774. dev_err(dev, "Error parsing memory region!\n");
  775. goto err_dispose2;
  776. }
  777. regs_start = res.start;
  778. regs_size = resource_size(&res);
  779. if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
  780. dev_err(dev, "Error requesting memory region!\n");
  781. retval = -EBUSY;
  782. goto err_dispose2;
  783. }
  784. mdma->regs = devm_ioremap(dev, regs_start, regs_size);
  785. if (!mdma->regs) {
  786. dev_err(dev, "Error mapping memory region!\n");
  787. retval = -ENOMEM;
  788. goto err_dispose2;
  789. }
  790. mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
  791. + MPC_DMA_TCD_OFFSET);
  792. retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);
  793. if (retval) {
  794. dev_err(dev, "Error requesting IRQ!\n");
  795. retval = -EINVAL;
  796. goto err_dispose2;
  797. }
  798. if (mdma->is_mpc8308) {
  799. retval = request_irq(mdma->irq2, &mpc_dma_irq, 0,
  800. DRV_NAME, mdma);
  801. if (retval) {
  802. dev_err(dev, "Error requesting IRQ2!\n");
  803. retval = -EINVAL;
  804. goto err_free1;
  805. }
  806. }
  807. spin_lock_init(&mdma->error_status_lock);
  808. dma = &mdma->dma;
  809. dma->dev = dev;
  810. if (mdma->is_mpc8308)
  811. dma->chancnt = MPC8308_DMACHAN_MAX;
  812. else
  813. dma->chancnt = MPC512x_DMACHAN_MAX;
  814. dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
  815. dma->device_free_chan_resources = mpc_dma_free_chan_resources;
  816. dma->device_issue_pending = mpc_dma_issue_pending;
  817. dma->device_tx_status = mpc_dma_tx_status;
  818. dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
  819. dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
  820. dma->device_control = mpc_dma_device_control;
  821. INIT_LIST_HEAD(&dma->channels);
  822. dma_cap_set(DMA_MEMCPY, dma->cap_mask);
  823. dma_cap_set(DMA_SLAVE, dma->cap_mask);
  824. for (i = 0; i < dma->chancnt; i++) {
  825. mchan = &mdma->channels[i];
  826. mchan->chan.device = dma;
  827. dma_cookie_init(&mchan->chan);
  828. INIT_LIST_HEAD(&mchan->free);
  829. INIT_LIST_HEAD(&mchan->prepared);
  830. INIT_LIST_HEAD(&mchan->queued);
  831. INIT_LIST_HEAD(&mchan->active);
  832. INIT_LIST_HEAD(&mchan->completed);
  833. spin_lock_init(&mchan->lock);
  834. list_add_tail(&mchan->chan.device_node, &dma->channels);
  835. }
  836. tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
  837. /*
  838. * Configure DMA Engine:
  839. * - Dynamic clock,
  840. * - Round-robin group arbitration,
  841. * - Round-robin channel arbitration.
  842. */
  843. if (mdma->is_mpc8308) {
  844. /* MPC8308 has 16 channels and lacks some registers */
  845. out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
  846. /* enable snooping */
  847. out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
  848. /* Disable error interrupts */
  849. out_be32(&mdma->regs->dmaeeil, 0);
  850. /* Clear interrupts status */
  851. out_be32(&mdma->regs->dmaintl, 0xFFFF);
  852. out_be32(&mdma->regs->dmaerrl, 0xFFFF);
  853. } else {
  854. out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
  855. MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
  856. /* Disable hardware DMA requests */
  857. out_be32(&mdma->regs->dmaerqh, 0);
  858. out_be32(&mdma->regs->dmaerql, 0);
  859. /* Disable error interrupts */
  860. out_be32(&mdma->regs->dmaeeih, 0);
  861. out_be32(&mdma->regs->dmaeeil, 0);
  862. /* Clear interrupts status */
  863. out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
  864. out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
  865. out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
  866. out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
  867. /* Route interrupts to IPIC */
  868. out_be32(&mdma->regs->dmaihsa, 0);
  869. out_be32(&mdma->regs->dmailsa, 0);
  870. }
  871. /* Register DMA engine */
  872. dev_set_drvdata(dev, mdma);
  873. retval = dma_async_device_register(dma);
  874. if (retval)
  875. goto err_free2;
  876. /* Register with OF helpers for DMA lookups (nonfatal) */
  877. if (dev->of_node) {
  878. retval = of_dma_controller_register(dev->of_node,
  879. of_dma_xlate_by_chan_id, mdma);
  880. if (retval)
  881. dev_warn(dev, "Could not register for OF lookup\n");
  882. }
  883. return 0;
  884. err_free2:
  885. if (mdma->is_mpc8308)
  886. free_irq(mdma->irq2, mdma);
  887. err_free1:
  888. free_irq(mdma->irq, mdma);
  889. err_dispose2:
  890. if (mdma->is_mpc8308)
  891. irq_dispose_mapping(mdma->irq2);
  892. err_dispose1:
  893. irq_dispose_mapping(mdma->irq);
  894. err:
  895. return retval;
  896. }
  897. static int mpc_dma_remove(struct platform_device *op)
  898. {
  899. struct device *dev = &op->dev;
  900. struct mpc_dma *mdma = dev_get_drvdata(dev);
  901. if (dev->of_node)
  902. of_dma_controller_free(dev->of_node);
  903. dma_async_device_unregister(&mdma->dma);
  904. if (mdma->is_mpc8308) {
  905. free_irq(mdma->irq2, mdma);
  906. irq_dispose_mapping(mdma->irq2);
  907. }
  908. free_irq(mdma->irq, mdma);
  909. irq_dispose_mapping(mdma->irq);
  910. return 0;
  911. }
  912. static struct of_device_id mpc_dma_match[] = {
  913. { .compatible = "fsl,mpc5121-dma", },
  914. { .compatible = "fsl,mpc8308-dma", },
  915. {},
  916. };
  917. static struct platform_driver mpc_dma_driver = {
  918. .probe = mpc_dma_probe,
  919. .remove = mpc_dma_remove,
  920. .driver = {
  921. .name = DRV_NAME,
  922. .owner = THIS_MODULE,
  923. .of_match_table = mpc_dma_match,
  924. },
  925. };
  926. module_platform_driver(mpc_dma_driver);
  927. MODULE_LICENSE("GPL");
  928. MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");