s3c24xx-dma.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321
  1. /*
  2. * S3C24XX DMA handling
  3. *
  4. * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
  5. *
  6. * based on amba-pl08x.c
  7. *
  8. * Copyright (c) 2006 ARM Ltd.
  9. * Copyright (c) 2010 ST-Ericsson SA
  10. *
  11. * Author: Peter Pearse <peter.pearse@arm.com>
  12. * Author: Linus Walleij <linus.walleij@stericsson.com>
  13. *
  14. * This program is free software; you can redistribute it and/or modify it
  15. * under the terms of the GNU General Public License as published by the Free
  16. * Software Foundation; either version 2 of the License, or (at your option)
  17. * any later version.
  18. *
  19. * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals
  20. * that can be routed to any of the 4 to 8 hardware-channels.
  21. *
  22. * Therefore on these DMA controllers the number of channels
  23. * and the number of incoming DMA signals are two totally different things.
  24. * It is usually not possible to theoretically handle all physical signals,
  25. * so a multiplexing scheme with possible denial of use is necessary.
  26. *
  27. * Open items:
  28. * - bursts
  29. */
  30. #include <linux/platform_device.h>
  31. #include <linux/types.h>
  32. #include <linux/dmaengine.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/clk.h>
  36. #include <linux/module.h>
  37. #include <linux/slab.h>
  38. #include <linux/platform_data/dma-s3c24xx.h>
  39. #include "dmaengine.h"
  40. #include "virt-dma.h"
  41. #define MAX_DMA_CHANNELS 8
  42. #define S3C24XX_DISRC 0x00
  43. #define S3C24XX_DISRCC 0x04
  44. #define S3C24XX_DISRCC_INC_INCREMENT 0
  45. #define S3C24XX_DISRCC_INC_FIXED BIT(0)
  46. #define S3C24XX_DISRCC_LOC_AHB 0
  47. #define S3C24XX_DISRCC_LOC_APB BIT(1)
  48. #define S3C24XX_DIDST 0x08
  49. #define S3C24XX_DIDSTC 0x0c
  50. #define S3C24XX_DIDSTC_INC_INCREMENT 0
  51. #define S3C24XX_DIDSTC_INC_FIXED BIT(0)
  52. #define S3C24XX_DIDSTC_LOC_AHB 0
  53. #define S3C24XX_DIDSTC_LOC_APB BIT(1)
  54. #define S3C24XX_DIDSTC_INT_TC0 0
  55. #define S3C24XX_DIDSTC_INT_RELOAD BIT(2)
  56. #define S3C24XX_DCON 0x10
  57. #define S3C24XX_DCON_TC_MASK 0xfffff
  58. #define S3C24XX_DCON_DSZ_BYTE (0 << 20)
  59. #define S3C24XX_DCON_DSZ_HALFWORD (1 << 20)
  60. #define S3C24XX_DCON_DSZ_WORD (2 << 20)
  61. #define S3C24XX_DCON_DSZ_MASK (3 << 20)
  62. #define S3C24XX_DCON_DSZ_SHIFT 20
  63. #define S3C24XX_DCON_AUTORELOAD 0
  64. #define S3C24XX_DCON_NORELOAD BIT(22)
  65. #define S3C24XX_DCON_HWTRIG BIT(23)
  66. #define S3C24XX_DCON_HWSRC_SHIFT 24
  67. #define S3C24XX_DCON_SERV_SINGLE 0
  68. #define S3C24XX_DCON_SERV_WHOLE BIT(27)
  69. #define S3C24XX_DCON_TSZ_UNIT 0
  70. #define S3C24XX_DCON_TSZ_BURST4 BIT(28)
  71. #define S3C24XX_DCON_INT BIT(29)
  72. #define S3C24XX_DCON_SYNC_PCLK 0
  73. #define S3C24XX_DCON_SYNC_HCLK BIT(30)
  74. #define S3C24XX_DCON_DEMAND 0
  75. #define S3C24XX_DCON_HANDSHAKE BIT(31)
  76. #define S3C24XX_DSTAT 0x14
  77. #define S3C24XX_DSTAT_STAT_BUSY BIT(20)
  78. #define S3C24XX_DSTAT_CURRTC_MASK 0xfffff
  79. #define S3C24XX_DMASKTRIG 0x20
  80. #define S3C24XX_DMASKTRIG_SWTRIG BIT(0)
  81. #define S3C24XX_DMASKTRIG_ON BIT(1)
  82. #define S3C24XX_DMASKTRIG_STOP BIT(2)
  83. #define S3C24XX_DMAREQSEL 0x24
  84. #define S3C24XX_DMAREQSEL_HW BIT(0)
  85. /*
  86. * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel
  87. * for a DMA source. Instead only specific channels are valid.
  88. * All of these SoCs have 4 physical channels and the number of request
  89. * source bits is 3. Additionally we also need 1 bit to mark the channel
  90. * as valid.
  91. * Therefore we separate the chansel element of the channel data into 4
  92. * parts of 4 bits each, to hold the information if the channel is valid
  93. * and the hw request source to use.
  94. *
  95. * Example:
  96. * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
  97. * For it the chansel field would look like
  98. *
  99. * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1
  100. * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2
  101. * ((BIT(3) | 2) << 0 * 4) // channel 0, with request source 2
  102. */
  103. #define S3C24XX_CHANSEL_WIDTH 4
  104. #define S3C24XX_CHANSEL_VALID BIT(3)
  105. #define S3C24XX_CHANSEL_REQ_MASK 7
  106. /*
  107. * struct soc_data - vendor-specific config parameters for individual SoCs
  108. * @stride: spacing between the registers of each channel
  109. * @has_reqsel: does the controller use the newer requestselection mechanism
  110. * @has_clocks: are controllable dma-clocks present
  111. */
  112. struct soc_data {
  113. int stride;
  114. bool has_reqsel;
  115. bool has_clocks;
  116. };
  117. /*
  118. * enum s3c24xx_dma_chan_state - holds the virtual channel states
  119. * @S3C24XX_DMA_CHAN_IDLE: the channel is idle
  120. * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport
  121. * channel and is running a transfer on it
  122. * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport
  123. * channel to become available (only pertains to memcpy channels)
  124. */
  125. enum s3c24xx_dma_chan_state {
  126. S3C24XX_DMA_CHAN_IDLE,
  127. S3C24XX_DMA_CHAN_RUNNING,
  128. S3C24XX_DMA_CHAN_WAITING,
  129. };
  130. /*
  131. * struct s3c24xx_sg - structure containing data per sg
  132. * @src_addr: src address of sg
  133. * @dst_addr: dst address of sg
  134. * @len: transfer len in bytes
  135. * @node: node for txd's dsg_list
  136. */
  137. struct s3c24xx_sg {
  138. dma_addr_t src_addr;
  139. dma_addr_t dst_addr;
  140. size_t len;
  141. struct list_head node;
  142. };
  143. /*
  144. * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
  145. * @vd: virtual DMA descriptor
  146. * @dsg_list: list of children sg's
  147. * @at: sg currently being transfered
  148. * @width: transfer width
  149. * @disrcc: value for source control register
  150. * @didstc: value for destination control register
  151. * @dcon: base value for dcon register
  152. */
  153. struct s3c24xx_txd {
  154. struct virt_dma_desc vd;
  155. struct list_head dsg_list;
  156. struct list_head *at;
  157. u8 width;
  158. u32 disrcc;
  159. u32 didstc;
  160. u32 dcon;
  161. };
  162. struct s3c24xx_dma_chan;
  163. /*
  164. * struct s3c24xx_dma_phy - holder for the physical channels
  165. * @id: physical index to this channel
  166. * @valid: does the channel have all required elements
  167. * @base: virtual memory base (remapped) for the this channel
  168. * @irq: interrupt for this channel
  169. * @clk: clock for this channel
  170. * @lock: a lock to use when altering an instance of this struct
  171. * @serving: virtual channel currently being served by this physicalchannel
  172. * @host: a pointer to the host (internal use)
  173. */
  174. struct s3c24xx_dma_phy {
  175. unsigned int id;
  176. bool valid;
  177. void __iomem *base;
  178. unsigned int irq;
  179. struct clk *clk;
  180. spinlock_t lock;
  181. struct s3c24xx_dma_chan *serving;
  182. struct s3c24xx_dma_engine *host;
  183. };
  184. /*
  185. * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
  186. * @id: the id of the channel
  187. * @name: name of the channel
  188. * @vc: wrappped virtual channel
  189. * @phy: the physical channel utilized by this channel, if there is one
  190. * @runtime_addr: address for RX/TX according to the runtime config
  191. * @at: active transaction on this channel
  192. * @lock: a lock for this channel data
  193. * @host: a pointer to the host (internal use)
  194. * @state: whether the channel is idle, running etc
  195. * @slave: whether this channel is a device (slave) or for memcpy
  196. */
  197. struct s3c24xx_dma_chan {
  198. int id;
  199. const char *name;
  200. struct virt_dma_chan vc;
  201. struct s3c24xx_dma_phy *phy;
  202. struct dma_slave_config cfg;
  203. struct s3c24xx_txd *at;
  204. struct s3c24xx_dma_engine *host;
  205. enum s3c24xx_dma_chan_state state;
  206. bool slave;
  207. };
  208. /*
  209. * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
  210. * @pdev: the corresponding platform device
  211. * @pdata: platform data passed in from the platform/machine
  212. * @base: virtual memory base (remapped)
  213. * @slave: slave engine for this instance
  214. * @memcpy: memcpy engine for this instance
  215. * @phy_chans: array of data for the physical channels
  216. */
  217. struct s3c24xx_dma_engine {
  218. struct platform_device *pdev;
  219. const struct s3c24xx_dma_platdata *pdata;
  220. struct soc_data *sdata;
  221. void __iomem *base;
  222. struct dma_device slave;
  223. struct dma_device memcpy;
  224. struct s3c24xx_dma_phy *phy_chans;
  225. };
  226. /*
  227. * Physical channel handling
  228. */
  229. /*
  230. * Check whether a certain channel is busy or not.
  231. */
  232. static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy)
  233. {
  234. unsigned int val = readl(phy->base + S3C24XX_DSTAT);
  235. return val & S3C24XX_DSTAT_STAT_BUSY;
  236. }
  237. static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan,
  238. struct s3c24xx_dma_phy *phy)
  239. {
  240. struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
  241. const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
  242. struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
  243. int phyvalid;
  244. /* every phy is valid for memcopy channels */
  245. if (!s3cchan->slave)
  246. return true;
  247. /* On newer variants all phys can be used for all virtual channels */
  248. if (s3cdma->sdata->has_reqsel)
  249. return true;
  250. phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH));
  251. return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false;
  252. }
  253. /*
  254. * Allocate a physical channel for a virtual channel
  255. *
  256. * Try to locate a physical channel to be used for this transfer. If all
  257. * are taken return NULL and the requester will have to cope by using
  258. * some fallback PIO mode or retrying later.
  259. */
  260. static
  261. struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
  262. {
  263. struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
  264. const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
  265. struct s3c24xx_dma_channel *cdata;
  266. struct s3c24xx_dma_phy *phy = NULL;
  267. unsigned long flags;
  268. int i;
  269. int ret;
  270. if (s3cchan->slave)
  271. cdata = &pdata->channels[s3cchan->id];
  272. for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
  273. phy = &s3cdma->phy_chans[i];
  274. if (!phy->valid)
  275. continue;
  276. if (!s3c24xx_dma_phy_valid(s3cchan, phy))
  277. continue;
  278. spin_lock_irqsave(&phy->lock, flags);
  279. if (!phy->serving) {
  280. phy->serving = s3cchan;
  281. spin_unlock_irqrestore(&phy->lock, flags);
  282. break;
  283. }
  284. spin_unlock_irqrestore(&phy->lock, flags);
  285. }
  286. /* No physical channel available, cope with it */
  287. if (i == s3cdma->pdata->num_phy_channels) {
  288. dev_warn(&s3cdma->pdev->dev, "no phy channel available\n");
  289. return NULL;
  290. }
  291. /* start the phy clock */
  292. if (s3cdma->sdata->has_clocks) {
  293. ret = clk_enable(phy->clk);
  294. if (ret) {
  295. dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n",
  296. phy->id, ret);
  297. phy->serving = NULL;
  298. return NULL;
  299. }
  300. }
  301. return phy;
  302. }
  303. /*
  304. * Mark the physical channel as free.
  305. *
  306. * This drops the link between the physical and virtual channel.
  307. */
  308. static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy)
  309. {
  310. struct s3c24xx_dma_engine *s3cdma = phy->host;
  311. if (s3cdma->sdata->has_clocks)
  312. clk_disable(phy->clk);
  313. phy->serving = NULL;
  314. }
  315. /*
  316. * Stops the channel by writing the stop bit.
  317. * This should not be used for an on-going transfer, but as a method of
  318. * shutting down a channel (eg, when it's no longer used) or terminating a
  319. * transfer.
  320. */
  321. static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy)
  322. {
  323. writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG);
  324. }
  325. /*
  326. * Virtual channel handling
  327. */
  328. static inline
  329. struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan)
  330. {
  331. return container_of(chan, struct s3c24xx_dma_chan, vc.chan);
  332. }
  333. static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
  334. {
  335. struct s3c24xx_dma_phy *phy = s3cchan->phy;
  336. struct s3c24xx_txd *txd = s3cchan->at;
  337. u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK;
  338. return tc * txd->width;
  339. }
  340. static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan,
  341. struct dma_slave_config *config)
  342. {
  343. if (!s3cchan->slave)
  344. return -EINVAL;
  345. /* Reject definitely invalid configurations */
  346. if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
  347. config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
  348. return -EINVAL;
  349. s3cchan->cfg = *config;
  350. return 0;
  351. }
  352. /*
  353. * Transfer handling
  354. */
  355. static inline
  356. struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx)
  357. {
  358. return container_of(tx, struct s3c24xx_txd, vd.tx);
  359. }
  360. static struct s3c24xx_txd *s3c24xx_dma_get_txd(void)
  361. {
  362. struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
  363. if (txd) {
  364. INIT_LIST_HEAD(&txd->dsg_list);
  365. txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD;
  366. }
  367. return txd;
  368. }
  369. static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd)
  370. {
  371. struct s3c24xx_sg *dsg, *_dsg;
  372. list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
  373. list_del(&dsg->node);
  374. kfree(dsg);
  375. }
  376. kfree(txd);
  377. }
  378. static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan,
  379. struct s3c24xx_txd *txd)
  380. {
  381. struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
  382. struct s3c24xx_dma_phy *phy = s3cchan->phy;
  383. const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
  384. struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node);
  385. u32 dcon = txd->dcon;
  386. u32 val;
  387. /* transfer-size and -count from len and width */
  388. switch (txd->width) {
  389. case 1:
  390. dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len;
  391. break;
  392. case 2:
  393. dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2);
  394. break;
  395. case 4:
  396. dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4);
  397. break;
  398. }
  399. if (s3cchan->slave) {
  400. struct s3c24xx_dma_channel *cdata =
  401. &pdata->channels[s3cchan->id];
  402. if (s3cdma->sdata->has_reqsel) {
  403. writel_relaxed((cdata->chansel << 1) |
  404. S3C24XX_DMAREQSEL_HW,
  405. phy->base + S3C24XX_DMAREQSEL);
  406. } else {
  407. int csel = cdata->chansel >> (phy->id *
  408. S3C24XX_CHANSEL_WIDTH);
  409. csel &= S3C24XX_CHANSEL_REQ_MASK;
  410. dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT;
  411. dcon |= S3C24XX_DCON_HWTRIG;
  412. }
  413. } else {
  414. if (s3cdma->sdata->has_reqsel)
  415. writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL);
  416. }
  417. writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC);
  418. writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC);
  419. writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST);
  420. writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC);
  421. writel_relaxed(dcon, phy->base + S3C24XX_DCON);
  422. val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG);
  423. val &= ~S3C24XX_DMASKTRIG_STOP;
  424. val |= S3C24XX_DMASKTRIG_ON;
  425. /* trigger the dma operation for memcpy transfers */
  426. if (!s3cchan->slave)
  427. val |= S3C24XX_DMASKTRIG_SWTRIG;
  428. writel(val, phy->base + S3C24XX_DMASKTRIG);
  429. }
  430. /*
  431. * Set the initial DMA register values and start first sg.
  432. */
  433. static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
  434. {
  435. struct s3c24xx_dma_phy *phy = s3cchan->phy;
  436. struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc);
  437. struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
  438. list_del(&txd->vd.node);
  439. s3cchan->at = txd;
  440. /* Wait for channel inactive */
  441. while (s3c24xx_dma_phy_busy(phy))
  442. cpu_relax();
  443. /* point to the first element of the sg list */
  444. txd->at = txd->dsg_list.next;
  445. s3c24xx_dma_start_next_sg(s3cchan, txd);
  446. }
  447. static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
  448. struct s3c24xx_dma_chan *s3cchan)
  449. {
  450. LIST_HEAD(head);
  451. vchan_get_all_descriptors(&s3cchan->vc, &head);
  452. vchan_dma_desc_free_list(&s3cchan->vc, &head);
  453. }
  454. /*
  455. * Try to allocate a physical channel. When successful, assign it to
  456. * this virtual channel, and initiate the next descriptor. The
  457. * virtual channel lock must be held at this point.
  458. */
  459. static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan)
  460. {
  461. struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
  462. struct s3c24xx_dma_phy *phy;
  463. phy = s3c24xx_dma_get_phy(s3cchan);
  464. if (!phy) {
  465. dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n",
  466. s3cchan->name);
  467. s3cchan->state = S3C24XX_DMA_CHAN_WAITING;
  468. return;
  469. }
  470. dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n",
  471. phy->id, s3cchan->name);
  472. s3cchan->phy = phy;
  473. s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
  474. s3c24xx_dma_start_next_txd(s3cchan);
  475. }
  476. static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy,
  477. struct s3c24xx_dma_chan *s3cchan)
  478. {
  479. struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
  480. dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n",
  481. phy->id, s3cchan->name);
  482. /*
  483. * We do this without taking the lock; we're really only concerned
  484. * about whether this pointer is NULL or not, and we're guaranteed
  485. * that this will only be called when it _already_ is non-NULL.
  486. */
  487. phy->serving = s3cchan;
  488. s3cchan->phy = phy;
  489. s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
  490. s3c24xx_dma_start_next_txd(s3cchan);
  491. }
  492. /*
  493. * Free a physical DMA channel, potentially reallocating it to another
  494. * virtual channel if we have any pending.
  495. */
  496. static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan)
  497. {
  498. struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
  499. struct s3c24xx_dma_chan *p, *next;
  500. retry:
  501. next = NULL;
  502. /* Find a waiting virtual channel for the next transfer. */
  503. list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
  504. if (p->state == S3C24XX_DMA_CHAN_WAITING) {
  505. next = p;
  506. break;
  507. }
  508. if (!next) {
  509. list_for_each_entry(p, &s3cdma->slave.channels,
  510. vc.chan.device_node)
  511. if (p->state == S3C24XX_DMA_CHAN_WAITING &&
  512. s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
  513. next = p;
  514. break;
  515. }
  516. }
  517. /* Ensure that the physical channel is stopped */
  518. s3c24xx_dma_terminate_phy(s3cchan->phy);
  519. if (next) {
  520. bool success;
  521. /*
  522. * Eww. We know this isn't going to deadlock
  523. * but lockdep probably doesn't.
  524. */
  525. spin_lock(&next->vc.lock);
  526. /* Re-check the state now that we have the lock */
  527. success = next->state == S3C24XX_DMA_CHAN_WAITING;
  528. if (success)
  529. s3c24xx_dma_phy_reassign_start(s3cchan->phy, next);
  530. spin_unlock(&next->vc.lock);
  531. /* If the state changed, try to find another channel */
  532. if (!success)
  533. goto retry;
  534. } else {
  535. /* No more jobs, so free up the physical channel */
  536. s3c24xx_dma_put_phy(s3cchan->phy);
  537. }
  538. s3cchan->phy = NULL;
  539. s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
  540. }
  541. static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
  542. {
  543. struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
  544. struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
  545. if (!s3cchan->slave)
  546. dma_descriptor_unmap(&vd->tx);
  547. s3c24xx_dma_free_txd(txd);
  548. }
  549. static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
  550. {
  551. struct s3c24xx_dma_phy *phy = data;
  552. struct s3c24xx_dma_chan *s3cchan = phy->serving;
  553. struct s3c24xx_txd *txd;
  554. dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id);
  555. /*
  556. * Interrupts happen to notify the completion of a transfer and the
  557. * channel should have moved into its stop state already on its own.
  558. * Therefore interrupts on channels not bound to a virtual channel
  559. * should never happen. Nevertheless send a terminate command to the
  560. * channel if the unlikely case happens.
  561. */
  562. if (unlikely(!s3cchan)) {
  563. dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n",
  564. phy->id);
  565. s3c24xx_dma_terminate_phy(phy);
  566. return IRQ_HANDLED;
  567. }
  568. spin_lock(&s3cchan->vc.lock);
  569. txd = s3cchan->at;
  570. if (txd) {
  571. /* when more sg's are in this txd, start the next one */
  572. if (!list_is_last(txd->at, &txd->dsg_list)) {
  573. txd->at = txd->at->next;
  574. s3c24xx_dma_start_next_sg(s3cchan, txd);
  575. } else {
  576. s3cchan->at = NULL;
  577. vchan_cookie_complete(&txd->vd);
  578. /*
  579. * And start the next descriptor (if any),
  580. * otherwise free this channel.
  581. */
  582. if (vchan_next_desc(&s3cchan->vc))
  583. s3c24xx_dma_start_next_txd(s3cchan);
  584. else
  585. s3c24xx_dma_phy_free(s3cchan);
  586. }
  587. }
  588. spin_unlock(&s3cchan->vc.lock);
  589. return IRQ_HANDLED;
  590. }
  591. /*
  592. * The DMA ENGINE API
  593. */
  594. static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  595. unsigned long arg)
  596. {
  597. struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
  598. struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
  599. unsigned long flags;
  600. int ret = 0;
  601. spin_lock_irqsave(&s3cchan->vc.lock, flags);
  602. switch (cmd) {
  603. case DMA_SLAVE_CONFIG:
  604. ret = s3c24xx_dma_set_runtime_config(s3cchan,
  605. (struct dma_slave_config *)arg);
  606. break;
  607. case DMA_TERMINATE_ALL:
  608. if (!s3cchan->phy && !s3cchan->at) {
  609. dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
  610. s3cchan->id);
  611. ret = -EINVAL;
  612. break;
  613. }
  614. s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
  615. /* Mark physical channel as free */
  616. if (s3cchan->phy)
  617. s3c24xx_dma_phy_free(s3cchan);
  618. /* Dequeue current job */
  619. if (s3cchan->at) {
  620. s3c24xx_dma_desc_free(&s3cchan->at->vd);
  621. s3cchan->at = NULL;
  622. }
  623. /* Dequeue jobs not yet fired as well */
  624. s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
  625. break;
  626. default:
  627. /* Unknown command */
  628. ret = -ENXIO;
  629. break;
  630. }
  631. spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
  632. return ret;
  633. }
  634. static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan)
  635. {
  636. return 0;
  637. }
  638. static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
  639. {
  640. /* Ensure all queued descriptors are freed */
  641. vchan_free_chan_resources(to_virt_chan(chan));
  642. }
  643. static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
  644. dma_cookie_t cookie, struct dma_tx_state *txstate)
  645. {
  646. struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
  647. struct s3c24xx_txd *txd;
  648. struct s3c24xx_sg *dsg;
  649. struct virt_dma_desc *vd;
  650. unsigned long flags;
  651. enum dma_status ret;
  652. size_t bytes = 0;
  653. spin_lock_irqsave(&s3cchan->vc.lock, flags);
  654. ret = dma_cookie_status(chan, cookie, txstate);
  655. if (ret == DMA_COMPLETE) {
  656. spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
  657. return ret;
  658. }
  659. /*
  660. * There's no point calculating the residue if there's
  661. * no txstate to store the value.
  662. */
  663. if (!txstate) {
  664. spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
  665. return ret;
  666. }
  667. vd = vchan_find_desc(&s3cchan->vc, cookie);
  668. if (vd) {
  669. /* On the issued list, so hasn't been processed yet */
  670. txd = to_s3c24xx_txd(&vd->tx);
  671. list_for_each_entry(dsg, &txd->dsg_list, node)
  672. bytes += dsg->len;
  673. } else {
  674. /*
  675. * Currently running, so sum over the pending sg's and
  676. * the currently active one.
  677. */
  678. txd = s3cchan->at;
  679. dsg = list_entry(txd->at, struct s3c24xx_sg, node);
  680. list_for_each_entry_from(dsg, &txd->dsg_list, node)
  681. bytes += dsg->len;
  682. bytes += s3c24xx_dma_getbytes_chan(s3cchan);
  683. }
  684. spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
  685. /*
  686. * This cookie not complete yet
  687. * Get number of bytes left in the active transactions and queue
  688. */
  689. dma_set_residue(txstate, bytes);
  690. /* Whether waiting or running, we're in progress */
  691. return ret;
  692. }
  693. /*
  694. * Initialize a descriptor to be used by memcpy submit
  695. */
  696. static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
  697. struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  698. size_t len, unsigned long flags)
  699. {
  700. struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
  701. struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
  702. struct s3c24xx_txd *txd;
  703. struct s3c24xx_sg *dsg;
  704. int src_mod, dest_mod;
  705. dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %d bytes from %s\n",
  706. len, s3cchan->name);
  707. if ((len & S3C24XX_DCON_TC_MASK) != len) {
  708. dev_err(&s3cdma->pdev->dev, "memcpy size %d to large\n", len);
  709. return NULL;
  710. }
  711. txd = s3c24xx_dma_get_txd();
  712. if (!txd)
  713. return NULL;
  714. dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
  715. if (!dsg) {
  716. s3c24xx_dma_free_txd(txd);
  717. return NULL;
  718. }
  719. list_add_tail(&dsg->node, &txd->dsg_list);
  720. dsg->src_addr = src;
  721. dsg->dst_addr = dest;
  722. dsg->len = len;
  723. /*
  724. * Determine a suitable transfer width.
  725. * The DMA controller cannot fetch/store information which is not
  726. * naturally aligned on the bus, i.e., a 4 byte fetch must start at
  727. * an address divisible by 4 - more generally addr % width must be 0.
  728. */
  729. src_mod = src % 4;
  730. dest_mod = dest % 4;
  731. switch (len % 4) {
  732. case 0:
  733. txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1;
  734. break;
  735. case 2:
  736. txd->width = ((src_mod == 2 || src_mod == 0) &&
  737. (dest_mod == 2 || dest_mod == 0)) ? 2 : 1;
  738. break;
  739. default:
  740. txd->width = 1;
  741. break;
  742. }
  743. txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT;
  744. txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT;
  745. txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK |
  746. S3C24XX_DCON_SERV_WHOLE;
  747. return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
  748. }
  749. static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
  750. struct dma_chan *chan, struct scatterlist *sgl,
  751. unsigned int sg_len, enum dma_transfer_direction direction,
  752. unsigned long flags, void *context)
  753. {
  754. struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
  755. struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
  756. const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
  757. struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
  758. struct s3c24xx_txd *txd;
  759. struct s3c24xx_sg *dsg;
  760. struct scatterlist *sg;
  761. dma_addr_t slave_addr;
  762. u32 hwcfg = 0;
  763. int tmp;
  764. dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n",
  765. sg_dma_len(sgl), s3cchan->name);
  766. txd = s3c24xx_dma_get_txd();
  767. if (!txd)
  768. return NULL;
  769. if (cdata->handshake)
  770. txd->dcon |= S3C24XX_DCON_HANDSHAKE;
  771. switch (cdata->bus) {
  772. case S3C24XX_DMA_APB:
  773. txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
  774. hwcfg |= S3C24XX_DISRCC_LOC_APB;
  775. break;
  776. case S3C24XX_DMA_AHB:
  777. txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
  778. hwcfg |= S3C24XX_DISRCC_LOC_AHB;
  779. break;
  780. }
  781. /*
  782. * Always assume our peripheral desintation is a fixed
  783. * address in memory.
  784. */
  785. hwcfg |= S3C24XX_DISRCC_INC_FIXED;
  786. /*
  787. * Individual dma operations are requested by the slave,
  788. * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
  789. */
  790. txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
  791. if (direction == DMA_MEM_TO_DEV) {
  792. txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
  793. S3C24XX_DISRCC_INC_INCREMENT;
  794. txd->didstc = hwcfg;
  795. slave_addr = s3cchan->cfg.dst_addr;
  796. txd->width = s3cchan->cfg.dst_addr_width;
  797. } else if (direction == DMA_DEV_TO_MEM) {
  798. txd->disrcc = hwcfg;
  799. txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
  800. S3C24XX_DIDSTC_INC_INCREMENT;
  801. slave_addr = s3cchan->cfg.src_addr;
  802. txd->width = s3cchan->cfg.src_addr_width;
  803. } else {
  804. s3c24xx_dma_free_txd(txd);
  805. dev_err(&s3cdma->pdev->dev,
  806. "direction %d unsupported\n", direction);
  807. return NULL;
  808. }
  809. for_each_sg(sgl, sg, sg_len, tmp) {
  810. dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
  811. if (!dsg) {
  812. s3c24xx_dma_free_txd(txd);
  813. return NULL;
  814. }
  815. list_add_tail(&dsg->node, &txd->dsg_list);
  816. dsg->len = sg_dma_len(sg);
  817. if (direction == DMA_MEM_TO_DEV) {
  818. dsg->src_addr = sg_dma_address(sg);
  819. dsg->dst_addr = slave_addr;
  820. } else { /* DMA_DEV_TO_MEM */
  821. dsg->src_addr = slave_addr;
  822. dsg->dst_addr = sg_dma_address(sg);
  823. }
  824. break;
  825. }
  826. return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
  827. }
  828. /*
  829. * Slave transactions callback to the slave device to allow
  830. * synchronization of slave DMA signals with the DMAC enable
  831. */
  832. static void s3c24xx_dma_issue_pending(struct dma_chan *chan)
  833. {
  834. struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
  835. unsigned long flags;
  836. spin_lock_irqsave(&s3cchan->vc.lock, flags);
  837. if (vchan_issue_pending(&s3cchan->vc)) {
  838. if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING)
  839. s3c24xx_dma_phy_alloc_and_start(s3cchan);
  840. }
  841. spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
  842. }
  843. /*
  844. * Bringup and teardown
  845. */
  846. /*
  847. * Initialise the DMAC memcpy/slave channels.
  848. * Make a local wrapper to hold required data
  849. */
  850. static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
  851. struct dma_device *dmadev, unsigned int channels, bool slave)
  852. {
  853. struct s3c24xx_dma_chan *chan;
  854. int i;
  855. INIT_LIST_HEAD(&dmadev->channels);
  856. /*
  857. * Register as many many memcpy as we have physical channels,
  858. * we won't always be able to use all but the code will have
  859. * to cope with that situation.
  860. */
  861. for (i = 0; i < channels; i++) {
  862. chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
  863. if (!chan) {
  864. dev_err(dmadev->dev,
  865. "%s no memory for channel\n", __func__);
  866. return -ENOMEM;
  867. }
  868. chan->id = i;
  869. chan->host = s3cdma;
  870. chan->state = S3C24XX_DMA_CHAN_IDLE;
  871. if (slave) {
  872. chan->slave = true;
  873. chan->name = kasprintf(GFP_KERNEL, "slave%d", i);
  874. if (!chan->name)
  875. return -ENOMEM;
  876. } else {
  877. chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
  878. if (!chan->name)
  879. return -ENOMEM;
  880. }
  881. dev_dbg(dmadev->dev,
  882. "initialize virtual channel \"%s\"\n",
  883. chan->name);
  884. chan->vc.desc_free = s3c24xx_dma_desc_free;
  885. vchan_init(&chan->vc, dmadev);
  886. }
  887. dev_info(dmadev->dev, "initialized %d virtual %s channels\n",
  888. i, slave ? "slave" : "memcpy");
  889. return i;
  890. }
  891. static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
  892. {
  893. struct s3c24xx_dma_chan *chan = NULL;
  894. struct s3c24xx_dma_chan *next;
  895. list_for_each_entry_safe(chan,
  896. next, &dmadev->channels, vc.chan.device_node)
  897. list_del(&chan->vc.chan.device_node);
  898. }
  899. /* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
  900. static struct soc_data soc_s3c2410 = {
  901. .stride = 0x40,
  902. .has_reqsel = false,
  903. .has_clocks = false,
  904. };
  905. /* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */
  906. static struct soc_data soc_s3c2412 = {
  907. .stride = 0x40,
  908. .has_reqsel = true,
  909. .has_clocks = true,
  910. };
  911. /* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */
  912. static struct soc_data soc_s3c2443 = {
  913. .stride = 0x100,
  914. .has_reqsel = true,
  915. .has_clocks = true,
  916. };
  917. static struct platform_device_id s3c24xx_dma_driver_ids[] = {
  918. {
  919. .name = "s3c2410-dma",
  920. .driver_data = (kernel_ulong_t)&soc_s3c2410,
  921. }, {
  922. .name = "s3c2412-dma",
  923. .driver_data = (kernel_ulong_t)&soc_s3c2412,
  924. }, {
  925. .name = "s3c2443-dma",
  926. .driver_data = (kernel_ulong_t)&soc_s3c2443,
  927. },
  928. { },
  929. };
  930. static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev)
  931. {
  932. return (struct soc_data *)
  933. platform_get_device_id(pdev)->driver_data;
  934. }
  935. static int s3c24xx_dma_probe(struct platform_device *pdev)
  936. {
  937. const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
  938. struct s3c24xx_dma_engine *s3cdma;
  939. struct soc_data *sdata;
  940. struct resource *res;
  941. int ret;
  942. int i;
  943. if (!pdata) {
  944. dev_err(&pdev->dev, "platform data missing\n");
  945. return -ENODEV;
  946. }
  947. /* Basic sanity check */
  948. if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
  949. dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
  950. pdata->num_phy_channels, MAX_DMA_CHANNELS);
  951. return -EINVAL;
  952. }
  953. sdata = s3c24xx_dma_get_soc_data(pdev);
  954. if (!sdata)
  955. return -EINVAL;
  956. s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL);
  957. if (!s3cdma)
  958. return -ENOMEM;
  959. s3cdma->pdev = pdev;
  960. s3cdma->pdata = pdata;
  961. s3cdma->sdata = sdata;
  962. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  963. s3cdma->base = devm_ioremap_resource(&pdev->dev, res);
  964. if (IS_ERR(s3cdma->base))
  965. return PTR_ERR(s3cdma->base);
  966. s3cdma->phy_chans = devm_kzalloc(&pdev->dev,
  967. sizeof(struct s3c24xx_dma_phy) *
  968. pdata->num_phy_channels,
  969. GFP_KERNEL);
  970. if (!s3cdma->phy_chans)
  971. return -ENOMEM;
  972. /* aquire irqs and clocks for all physical channels */
  973. for (i = 0; i < pdata->num_phy_channels; i++) {
  974. struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
  975. char clk_name[6];
  976. phy->id = i;
  977. phy->base = s3cdma->base + (i * sdata->stride);
  978. phy->host = s3cdma;
  979. phy->irq = platform_get_irq(pdev, i);
  980. if (phy->irq < 0) {
  981. dev_err(&pdev->dev, "failed to get irq %d, err %d\n",
  982. i, phy->irq);
  983. continue;
  984. }
  985. ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
  986. 0, pdev->name, phy);
  987. if (ret) {
  988. dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n",
  989. i, ret);
  990. continue;
  991. }
  992. if (sdata->has_clocks) {
  993. sprintf(clk_name, "dma.%d", i);
  994. phy->clk = devm_clk_get(&pdev->dev, clk_name);
  995. if (IS_ERR(phy->clk) && sdata->has_clocks) {
  996. dev_err(&pdev->dev, "unable to aquire clock for channel %d, error %lu",
  997. i, PTR_ERR(phy->clk));
  998. continue;
  999. }
  1000. ret = clk_prepare(phy->clk);
  1001. if (ret) {
  1002. dev_err(&pdev->dev, "clock for phy %d failed, error %d\n",
  1003. i, ret);
  1004. continue;
  1005. }
  1006. }
  1007. spin_lock_init(&phy->lock);
  1008. phy->valid = true;
  1009. dev_dbg(&pdev->dev, "physical channel %d is %s\n",
  1010. i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE");
  1011. }
  1012. /* Initialize memcpy engine */
  1013. dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
  1014. dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
  1015. s3cdma->memcpy.dev = &pdev->dev;
  1016. s3cdma->memcpy.device_alloc_chan_resources =
  1017. s3c24xx_dma_alloc_chan_resources;
  1018. s3cdma->memcpy.device_free_chan_resources =
  1019. s3c24xx_dma_free_chan_resources;
  1020. s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
  1021. s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
  1022. s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
  1023. s3cdma->memcpy.device_control = s3c24xx_dma_control;
  1024. /* Initialize slave engine for SoC internal dedicated peripherals */
  1025. dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
  1026. dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
  1027. s3cdma->slave.dev = &pdev->dev;
  1028. s3cdma->slave.device_alloc_chan_resources =
  1029. s3c24xx_dma_alloc_chan_resources;
  1030. s3cdma->slave.device_free_chan_resources =
  1031. s3c24xx_dma_free_chan_resources;
  1032. s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
  1033. s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
  1034. s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
  1035. s3cdma->slave.device_control = s3c24xx_dma_control;
  1036. /* Register as many memcpy channels as there are physical channels */
  1037. ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
  1038. pdata->num_phy_channels, false);
  1039. if (ret <= 0) {
  1040. dev_warn(&pdev->dev,
  1041. "%s failed to enumerate memcpy channels - %d\n",
  1042. __func__, ret);
  1043. goto err_memcpy;
  1044. }
  1045. /* Register slave channels */
  1046. ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave,
  1047. pdata->num_channels, true);
  1048. if (ret <= 0) {
  1049. dev_warn(&pdev->dev,
  1050. "%s failed to enumerate slave channels - %d\n",
  1051. __func__, ret);
  1052. goto err_slave;
  1053. }
  1054. ret = dma_async_device_register(&s3cdma->memcpy);
  1055. if (ret) {
  1056. dev_warn(&pdev->dev,
  1057. "%s failed to register memcpy as an async device - %d\n",
  1058. __func__, ret);
  1059. goto err_memcpy_reg;
  1060. }
  1061. ret = dma_async_device_register(&s3cdma->slave);
  1062. if (ret) {
  1063. dev_warn(&pdev->dev,
  1064. "%s failed to register slave as an async device - %d\n",
  1065. __func__, ret);
  1066. goto err_slave_reg;
  1067. }
  1068. platform_set_drvdata(pdev, s3cdma);
  1069. dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n",
  1070. pdata->num_phy_channels);
  1071. return 0;
  1072. err_slave_reg:
  1073. dma_async_device_unregister(&s3cdma->memcpy);
  1074. err_memcpy_reg:
  1075. s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
  1076. err_slave:
  1077. s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
  1078. err_memcpy:
  1079. if (sdata->has_clocks)
  1080. for (i = 0; i < pdata->num_phy_channels; i++) {
  1081. struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
  1082. if (phy->valid)
  1083. clk_unprepare(phy->clk);
  1084. }
  1085. return ret;
  1086. }
  1087. static int s3c24xx_dma_remove(struct platform_device *pdev)
  1088. {
  1089. const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
  1090. struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev);
  1091. struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev);
  1092. int i;
  1093. dma_async_device_unregister(&s3cdma->slave);
  1094. dma_async_device_unregister(&s3cdma->memcpy);
  1095. s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
  1096. s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
  1097. if (sdata->has_clocks)
  1098. for (i = 0; i < pdata->num_phy_channels; i++) {
  1099. struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
  1100. if (phy->valid)
  1101. clk_unprepare(phy->clk);
  1102. }
  1103. return 0;
  1104. }
  1105. static struct platform_driver s3c24xx_dma_driver = {
  1106. .driver = {
  1107. .name = "s3c24xx-dma",
  1108. .owner = THIS_MODULE,
  1109. },
  1110. .id_table = s3c24xx_dma_driver_ids,
  1111. .probe = s3c24xx_dma_probe,
  1112. .remove = s3c24xx_dma_remove,
  1113. };
  1114. module_platform_driver(s3c24xx_dma_driver);
  1115. bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
  1116. {
  1117. struct s3c24xx_dma_chan *s3cchan;
  1118. if (chan->device->dev->driver != &s3c24xx_dma_driver.driver)
  1119. return false;
  1120. s3cchan = to_s3c24xx_dma_chan(chan);
  1121. return s3cchan->id == (int)param;
  1122. }
  1123. EXPORT_SYMBOL(s3c24xx_dma_filter);
  1124. MODULE_DESCRIPTION("S3C24XX DMA Driver");
  1125. MODULE_AUTHOR("Heiko Stuebner");
  1126. MODULE_LICENSE("GPL v2");