edma.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160
  1. /*
  2. * TI EDMA DMA engine driver
  3. *
  4. * Copyright 2012 Texas Instruments
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation version 2.
  9. *
  10. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11. * kind, whether express or implied; without even the implied warranty
  12. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/dmaengine.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/err.h>
  18. #include <linux/init.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/list.h>
  21. #include <linux/module.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/slab.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/platform_data/edma.h>
  26. #include "dmaengine.h"
  27. #include "virt-dma.h"
  28. /*
  29. * This will go away when the private EDMA API is folded
  30. * into this driver and the platform device(s) are
  31. * instantiated in the arch code. We can only get away
  32. * with this simplification because DA8XX may not be built
  33. * in the same kernel image with other DaVinci parts. This
  34. * avoids having to sprinkle dmaengine driver platform devices
  35. * and data throughout all the existing board files.
  36. */
  37. #ifdef CONFIG_ARCH_DAVINCI_DA8XX
  38. #define EDMA_CTLRS 2
  39. #define EDMA_CHANS 32
  40. #else
  41. #define EDMA_CTLRS 1
  42. #define EDMA_CHANS 64
  43. #endif /* CONFIG_ARCH_DAVINCI_DA8XX */
  44. /*
  45. * Max of 20 segments per channel to conserve PaRAM slots
  46. * Also note that MAX_NR_SG should be atleast the no.of periods
  47. * that are required for ASoC, otherwise DMA prep calls will
  48. * fail. Today davinci-pcm is the only user of this driver and
  49. * requires atleast 17 slots, so we setup the default to 20.
  50. */
  51. #define MAX_NR_SG 20
  52. #define EDMA_MAX_SLOTS MAX_NR_SG
  53. #define EDMA_DESCRIPTORS 16
  54. struct edma_pset {
  55. u32 len;
  56. dma_addr_t addr;
  57. struct edmacc_param param;
  58. };
  59. struct edma_desc {
  60. struct virt_dma_desc vdesc;
  61. struct list_head node;
  62. enum dma_transfer_direction direction;
  63. int cyclic;
  64. int absync;
  65. int pset_nr;
  66. struct edma_chan *echan;
  67. int processed;
  68. /*
  69. * The following 4 elements are used for residue accounting.
  70. *
  71. * - processed_stat: the number of SG elements we have traversed
  72. * so far to cover accounting. This is updated directly to processed
  73. * during edma_callback and is always <= processed, because processed
  74. * refers to the number of pending transfer (programmed to EDMA
  75. * controller), where as processed_stat tracks number of transfers
  76. * accounted for so far.
  77. *
  78. * - residue: The amount of bytes we have left to transfer for this desc
  79. *
  80. * - residue_stat: The residue in bytes of data we have covered
  81. * so far for accounting. This is updated directly to residue
  82. * during callbacks to keep it current.
  83. *
  84. * - sg_len: Tracks the length of the current intermediate transfer,
  85. * this is required to update the residue during intermediate transfer
  86. * completion callback.
  87. */
  88. int processed_stat;
  89. u32 sg_len;
  90. u32 residue;
  91. u32 residue_stat;
  92. struct edma_pset pset[0];
  93. };
  94. struct edma_cc;
  95. struct edma_chan {
  96. struct virt_dma_chan vchan;
  97. struct list_head node;
  98. struct edma_desc *edesc;
  99. struct edma_cc *ecc;
  100. int ch_num;
  101. bool alloced;
  102. int slot[EDMA_MAX_SLOTS];
  103. int missed;
  104. struct dma_slave_config cfg;
  105. };
  106. struct edma_cc {
  107. int ctlr;
  108. struct dma_device dma_slave;
  109. struct edma_chan slave_chans[EDMA_CHANS];
  110. int num_slave_chans;
  111. int dummy_slot;
  112. };
  113. static inline struct edma_cc *to_edma_cc(struct dma_device *d)
  114. {
  115. return container_of(d, struct edma_cc, dma_slave);
  116. }
  117. static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
  118. {
  119. return container_of(c, struct edma_chan, vchan.chan);
  120. }
  121. static inline struct edma_desc
  122. *to_edma_desc(struct dma_async_tx_descriptor *tx)
  123. {
  124. return container_of(tx, struct edma_desc, vdesc.tx);
  125. }
  126. static void edma_desc_free(struct virt_dma_desc *vdesc)
  127. {
  128. kfree(container_of(vdesc, struct edma_desc, vdesc));
  129. }
  130. /* Dispatch a queued descriptor to the controller (caller holds lock) */
  131. static void edma_execute(struct edma_chan *echan)
  132. {
  133. struct virt_dma_desc *vdesc;
  134. struct edma_desc *edesc;
  135. struct device *dev = echan->vchan.chan.device->dev;
  136. int i, j, left, nslots;
  137. /* If either we processed all psets or we're still not started */
  138. if (!echan->edesc ||
  139. echan->edesc->pset_nr == echan->edesc->processed) {
  140. /* Get next vdesc */
  141. vdesc = vchan_next_desc(&echan->vchan);
  142. if (!vdesc) {
  143. echan->edesc = NULL;
  144. return;
  145. }
  146. list_del(&vdesc->node);
  147. echan->edesc = to_edma_desc(&vdesc->tx);
  148. }
  149. edesc = echan->edesc;
  150. /* Find out how many left */
  151. left = edesc->pset_nr - edesc->processed;
  152. nslots = min(MAX_NR_SG, left);
  153. edesc->sg_len = 0;
  154. /* Write descriptor PaRAM set(s) */
  155. for (i = 0; i < nslots; i++) {
  156. j = i + edesc->processed;
  157. edma_write_slot(echan->slot[i], &edesc->pset[j].param);
  158. edesc->sg_len += edesc->pset[j].len;
  159. dev_vdbg(echan->vchan.chan.device->dev,
  160. "\n pset[%d]:\n"
  161. " chnum\t%d\n"
  162. " slot\t%d\n"
  163. " opt\t%08x\n"
  164. " src\t%08x\n"
  165. " dst\t%08x\n"
  166. " abcnt\t%08x\n"
  167. " ccnt\t%08x\n"
  168. " bidx\t%08x\n"
  169. " cidx\t%08x\n"
  170. " lkrld\t%08x\n",
  171. j, echan->ch_num, echan->slot[i],
  172. edesc->pset[j].param.opt,
  173. edesc->pset[j].param.src,
  174. edesc->pset[j].param.dst,
  175. edesc->pset[j].param.a_b_cnt,
  176. edesc->pset[j].param.ccnt,
  177. edesc->pset[j].param.src_dst_bidx,
  178. edesc->pset[j].param.src_dst_cidx,
  179. edesc->pset[j].param.link_bcntrld);
  180. /* Link to the previous slot if not the last set */
  181. if (i != (nslots - 1))
  182. edma_link(echan->slot[i], echan->slot[i+1]);
  183. }
  184. edesc->processed += nslots;
  185. /*
  186. * If this is either the last set in a set of SG-list transactions
  187. * then setup a link to the dummy slot, this results in all future
  188. * events being absorbed and that's OK because we're done
  189. */
  190. if (edesc->processed == edesc->pset_nr) {
  191. if (edesc->cyclic)
  192. edma_link(echan->slot[nslots-1], echan->slot[1]);
  193. else
  194. edma_link(echan->slot[nslots-1],
  195. echan->ecc->dummy_slot);
  196. }
  197. if (edesc->processed <= MAX_NR_SG) {
  198. dev_dbg(dev, "first transfer starting on channel %d\n",
  199. echan->ch_num);
  200. edma_start(echan->ch_num);
  201. } else {
  202. dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
  203. echan->ch_num, edesc->processed);
  204. edma_resume(echan->ch_num);
  205. }
  206. /*
  207. * This happens due to setup times between intermediate transfers
  208. * in long SG lists which have to be broken up into transfers of
  209. * MAX_NR_SG
  210. */
  211. if (echan->missed) {
  212. dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
  213. edma_clean_channel(echan->ch_num);
  214. edma_stop(echan->ch_num);
  215. edma_start(echan->ch_num);
  216. edma_trigger_channel(echan->ch_num);
  217. echan->missed = 0;
  218. }
  219. }
  220. static int edma_terminate_all(struct edma_chan *echan)
  221. {
  222. unsigned long flags;
  223. LIST_HEAD(head);
  224. spin_lock_irqsave(&echan->vchan.lock, flags);
  225. /*
  226. * Stop DMA activity: we assume the callback will not be called
  227. * after edma_dma() returns (even if it does, it will see
  228. * echan->edesc is NULL and exit.)
  229. */
  230. if (echan->edesc) {
  231. int cyclic = echan->edesc->cyclic;
  232. echan->edesc = NULL;
  233. edma_stop(echan->ch_num);
  234. /* Move the cyclic channel back to default queue */
  235. if (cyclic)
  236. edma_assign_channel_eventq(echan->ch_num,
  237. EVENTQ_DEFAULT);
  238. }
  239. vchan_get_all_descriptors(&echan->vchan, &head);
  240. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  241. vchan_dma_desc_free_list(&echan->vchan, &head);
  242. return 0;
  243. }
  244. static int edma_slave_config(struct edma_chan *echan,
  245. struct dma_slave_config *cfg)
  246. {
  247. if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
  248. cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
  249. return -EINVAL;
  250. memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
  251. return 0;
  252. }
  253. static int edma_dma_pause(struct edma_chan *echan)
  254. {
  255. /* Pause/Resume only allowed with cyclic mode */
  256. if (!echan->edesc->cyclic)
  257. return -EINVAL;
  258. edma_pause(echan->ch_num);
  259. return 0;
  260. }
  261. static int edma_dma_resume(struct edma_chan *echan)
  262. {
  263. /* Pause/Resume only allowed with cyclic mode */
  264. if (!echan->edesc->cyclic)
  265. return -EINVAL;
  266. edma_resume(echan->ch_num);
  267. return 0;
  268. }
  269. static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  270. unsigned long arg)
  271. {
  272. int ret = 0;
  273. struct dma_slave_config *config;
  274. struct edma_chan *echan = to_edma_chan(chan);
  275. switch (cmd) {
  276. case DMA_TERMINATE_ALL:
  277. edma_terminate_all(echan);
  278. break;
  279. case DMA_SLAVE_CONFIG:
  280. config = (struct dma_slave_config *)arg;
  281. ret = edma_slave_config(echan, config);
  282. break;
  283. case DMA_PAUSE:
  284. ret = edma_dma_pause(echan);
  285. break;
  286. case DMA_RESUME:
  287. ret = edma_dma_resume(echan);
  288. break;
  289. default:
  290. ret = -ENOSYS;
  291. }
  292. return ret;
  293. }
  294. /*
  295. * A PaRAM set configuration abstraction used by other modes
  296. * @chan: Channel who's PaRAM set we're configuring
  297. * @pset: PaRAM set to initialize and setup.
  298. * @src_addr: Source address of the DMA
  299. * @dst_addr: Destination address of the DMA
  300. * @burst: In units of dev_width, how much to send
  301. * @dev_width: How much is the dev_width
  302. * @dma_length: Total length of the DMA transfer
  303. * @direction: Direction of the transfer
  304. */
  305. static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
  306. dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
  307. enum dma_slave_buswidth dev_width, unsigned int dma_length,
  308. enum dma_transfer_direction direction)
  309. {
  310. struct edma_chan *echan = to_edma_chan(chan);
  311. struct device *dev = chan->device->dev;
  312. struct edmacc_param *param = &epset->param;
  313. int acnt, bcnt, ccnt, cidx;
  314. int src_bidx, dst_bidx, src_cidx, dst_cidx;
  315. int absync;
  316. acnt = dev_width;
  317. /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
  318. if (!burst)
  319. burst = 1;
  320. /*
  321. * If the maxburst is equal to the fifo width, use
  322. * A-synced transfers. This allows for large contiguous
  323. * buffer transfers using only one PaRAM set.
  324. */
  325. if (burst == 1) {
  326. /*
  327. * For the A-sync case, bcnt and ccnt are the remainder
  328. * and quotient respectively of the division of:
  329. * (dma_length / acnt) by (SZ_64K -1). This is so
  330. * that in case bcnt over flows, we have ccnt to use.
  331. * Note: In A-sync tranfer only, bcntrld is used, but it
  332. * only applies for sg_dma_len(sg) >= SZ_64K.
  333. * In this case, the best way adopted is- bccnt for the
  334. * first frame will be the remainder below. Then for
  335. * every successive frame, bcnt will be SZ_64K-1. This
  336. * is assured as bcntrld = 0xffff in end of function.
  337. */
  338. absync = false;
  339. ccnt = dma_length / acnt / (SZ_64K - 1);
  340. bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
  341. /*
  342. * If bcnt is non-zero, we have a remainder and hence an
  343. * extra frame to transfer, so increment ccnt.
  344. */
  345. if (bcnt)
  346. ccnt++;
  347. else
  348. bcnt = SZ_64K - 1;
  349. cidx = acnt;
  350. } else {
  351. /*
  352. * If maxburst is greater than the fifo address_width,
  353. * use AB-synced transfers where A count is the fifo
  354. * address_width and B count is the maxburst. In this
  355. * case, we are limited to transfers of C count frames
  356. * of (address_width * maxburst) where C count is limited
  357. * to SZ_64K-1. This places an upper bound on the length
  358. * of an SG segment that can be handled.
  359. */
  360. absync = true;
  361. bcnt = burst;
  362. ccnt = dma_length / (acnt * bcnt);
  363. if (ccnt > (SZ_64K - 1)) {
  364. dev_err(dev, "Exceeded max SG segment size\n");
  365. return -EINVAL;
  366. }
  367. cidx = acnt * bcnt;
  368. }
  369. epset->len = dma_length;
  370. if (direction == DMA_MEM_TO_DEV) {
  371. src_bidx = acnt;
  372. src_cidx = cidx;
  373. dst_bidx = 0;
  374. dst_cidx = 0;
  375. epset->addr = src_addr;
  376. } else if (direction == DMA_DEV_TO_MEM) {
  377. src_bidx = 0;
  378. src_cidx = 0;
  379. dst_bidx = acnt;
  380. dst_cidx = cidx;
  381. epset->addr = dst_addr;
  382. } else if (direction == DMA_MEM_TO_MEM) {
  383. src_bidx = acnt;
  384. src_cidx = cidx;
  385. dst_bidx = acnt;
  386. dst_cidx = cidx;
  387. } else {
  388. dev_err(dev, "%s: direction not implemented yet\n", __func__);
  389. return -EINVAL;
  390. }
  391. param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
  392. /* Configure A or AB synchronized transfers */
  393. if (absync)
  394. param->opt |= SYNCDIM;
  395. param->src = src_addr;
  396. param->dst = dst_addr;
  397. param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
  398. param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
  399. param->a_b_cnt = bcnt << 16 | acnt;
  400. param->ccnt = ccnt;
  401. /*
  402. * Only time when (bcntrld) auto reload is required is for
  403. * A-sync case, and in this case, a requirement of reload value
  404. * of SZ_64K-1 only is assured. 'link' is initially set to NULL
  405. * and then later will be populated by edma_execute.
  406. */
  407. param->link_bcntrld = 0xffffffff;
  408. return absync;
  409. }
  410. static struct dma_async_tx_descriptor *edma_prep_slave_sg(
  411. struct dma_chan *chan, struct scatterlist *sgl,
  412. unsigned int sg_len, enum dma_transfer_direction direction,
  413. unsigned long tx_flags, void *context)
  414. {
  415. struct edma_chan *echan = to_edma_chan(chan);
  416. struct device *dev = chan->device->dev;
  417. struct edma_desc *edesc;
  418. dma_addr_t src_addr = 0, dst_addr = 0;
  419. enum dma_slave_buswidth dev_width;
  420. u32 burst;
  421. struct scatterlist *sg;
  422. int i, nslots, ret;
  423. if (unlikely(!echan || !sgl || !sg_len))
  424. return NULL;
  425. if (direction == DMA_DEV_TO_MEM) {
  426. src_addr = echan->cfg.src_addr;
  427. dev_width = echan->cfg.src_addr_width;
  428. burst = echan->cfg.src_maxburst;
  429. } else if (direction == DMA_MEM_TO_DEV) {
  430. dst_addr = echan->cfg.dst_addr;
  431. dev_width = echan->cfg.dst_addr_width;
  432. burst = echan->cfg.dst_maxburst;
  433. } else {
  434. dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
  435. return NULL;
  436. }
  437. if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
  438. dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
  439. return NULL;
  440. }
  441. edesc = kzalloc(sizeof(*edesc) + sg_len *
  442. sizeof(edesc->pset[0]), GFP_ATOMIC);
  443. if (!edesc) {
  444. dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
  445. return NULL;
  446. }
  447. edesc->pset_nr = sg_len;
  448. edesc->residue = 0;
  449. edesc->direction = direction;
  450. edesc->echan = echan;
  451. /* Allocate a PaRAM slot, if needed */
  452. nslots = min_t(unsigned, MAX_NR_SG, sg_len);
  453. for (i = 0; i < nslots; i++) {
  454. if (echan->slot[i] < 0) {
  455. echan->slot[i] =
  456. edma_alloc_slot(EDMA_CTLR(echan->ch_num),
  457. EDMA_SLOT_ANY);
  458. if (echan->slot[i] < 0) {
  459. kfree(edesc);
  460. dev_err(dev, "%s: Failed to allocate slot\n",
  461. __func__);
  462. return NULL;
  463. }
  464. }
  465. }
  466. /* Configure PaRAM sets for each SG */
  467. for_each_sg(sgl, sg, sg_len, i) {
  468. /* Get address for each SG */
  469. if (direction == DMA_DEV_TO_MEM)
  470. dst_addr = sg_dma_address(sg);
  471. else
  472. src_addr = sg_dma_address(sg);
  473. ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
  474. dst_addr, burst, dev_width,
  475. sg_dma_len(sg), direction);
  476. if (ret < 0) {
  477. kfree(edesc);
  478. return NULL;
  479. }
  480. edesc->absync = ret;
  481. edesc->residue += sg_dma_len(sg);
  482. /* If this is the last in a current SG set of transactions,
  483. enable interrupts so that next set is processed */
  484. if (!((i+1) % MAX_NR_SG))
  485. edesc->pset[i].param.opt |= TCINTEN;
  486. /* If this is the last set, enable completion interrupt flag */
  487. if (i == sg_len - 1)
  488. edesc->pset[i].param.opt |= TCINTEN;
  489. }
  490. edesc->residue_stat = edesc->residue;
  491. return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
  492. }
  493. struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
  494. struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  495. size_t len, unsigned long tx_flags)
  496. {
  497. int ret;
  498. struct edma_desc *edesc;
  499. struct device *dev = chan->device->dev;
  500. struct edma_chan *echan = to_edma_chan(chan);
  501. if (unlikely(!echan || !len))
  502. return NULL;
  503. edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC);
  504. if (!edesc) {
  505. dev_dbg(dev, "Failed to allocate a descriptor\n");
  506. return NULL;
  507. }
  508. edesc->pset_nr = 1;
  509. ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
  510. DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM);
  511. if (ret < 0)
  512. return NULL;
  513. edesc->absync = ret;
  514. /*
  515. * Enable intermediate transfer chaining to re-trigger channel
  516. * on completion of every TR, and enable transfer-completion
  517. * interrupt on completion of the whole transfer.
  518. */
  519. edesc->pset[0].param.opt |= ITCCHEN;
  520. edesc->pset[0].param.opt |= TCINTEN;
  521. return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
  522. }
  523. static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
  524. struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  525. size_t period_len, enum dma_transfer_direction direction,
  526. unsigned long tx_flags, void *context)
  527. {
  528. struct edma_chan *echan = to_edma_chan(chan);
  529. struct device *dev = chan->device->dev;
  530. struct edma_desc *edesc;
  531. dma_addr_t src_addr, dst_addr;
  532. enum dma_slave_buswidth dev_width;
  533. u32 burst;
  534. int i, ret, nslots;
  535. if (unlikely(!echan || !buf_len || !period_len))
  536. return NULL;
  537. if (direction == DMA_DEV_TO_MEM) {
  538. src_addr = echan->cfg.src_addr;
  539. dst_addr = buf_addr;
  540. dev_width = echan->cfg.src_addr_width;
  541. burst = echan->cfg.src_maxburst;
  542. } else if (direction == DMA_MEM_TO_DEV) {
  543. src_addr = buf_addr;
  544. dst_addr = echan->cfg.dst_addr;
  545. dev_width = echan->cfg.dst_addr_width;
  546. burst = echan->cfg.dst_maxburst;
  547. } else {
  548. dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
  549. return NULL;
  550. }
  551. if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
  552. dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
  553. return NULL;
  554. }
  555. if (unlikely(buf_len % period_len)) {
  556. dev_err(dev, "Period should be multiple of Buffer length\n");
  557. return NULL;
  558. }
  559. nslots = (buf_len / period_len) + 1;
  560. /*
  561. * Cyclic DMA users such as audio cannot tolerate delays introduced
  562. * by cases where the number of periods is more than the maximum
  563. * number of SGs the EDMA driver can handle at a time. For DMA types
  564. * such as Slave SGs, such delays are tolerable and synchronized,
  565. * but the synchronization is difficult to achieve with Cyclic and
  566. * cannot be guaranteed, so we error out early.
  567. */
  568. if (nslots > MAX_NR_SG)
  569. return NULL;
  570. edesc = kzalloc(sizeof(*edesc) + nslots *
  571. sizeof(edesc->pset[0]), GFP_ATOMIC);
  572. if (!edesc) {
  573. dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
  574. return NULL;
  575. }
  576. edesc->cyclic = 1;
  577. edesc->pset_nr = nslots;
  578. edesc->residue = edesc->residue_stat = buf_len;
  579. edesc->direction = direction;
  580. edesc->echan = echan;
  581. dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
  582. __func__, echan->ch_num, nslots, period_len, buf_len);
  583. for (i = 0; i < nslots; i++) {
  584. /* Allocate a PaRAM slot, if needed */
  585. if (echan->slot[i] < 0) {
  586. echan->slot[i] =
  587. edma_alloc_slot(EDMA_CTLR(echan->ch_num),
  588. EDMA_SLOT_ANY);
  589. if (echan->slot[i] < 0) {
  590. kfree(edesc);
  591. dev_err(dev, "%s: Failed to allocate slot\n",
  592. __func__);
  593. return NULL;
  594. }
  595. }
  596. if (i == nslots - 1) {
  597. memcpy(&edesc->pset[i], &edesc->pset[0],
  598. sizeof(edesc->pset[0]));
  599. break;
  600. }
  601. ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
  602. dst_addr, burst, dev_width, period_len,
  603. direction);
  604. if (ret < 0) {
  605. kfree(edesc);
  606. return NULL;
  607. }
  608. if (direction == DMA_DEV_TO_MEM)
  609. dst_addr += period_len;
  610. else
  611. src_addr += period_len;
  612. dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
  613. dev_vdbg(dev,
  614. "\n pset[%d]:\n"
  615. " chnum\t%d\n"
  616. " slot\t%d\n"
  617. " opt\t%08x\n"
  618. " src\t%08x\n"
  619. " dst\t%08x\n"
  620. " abcnt\t%08x\n"
  621. " ccnt\t%08x\n"
  622. " bidx\t%08x\n"
  623. " cidx\t%08x\n"
  624. " lkrld\t%08x\n",
  625. i, echan->ch_num, echan->slot[i],
  626. edesc->pset[i].param.opt,
  627. edesc->pset[i].param.src,
  628. edesc->pset[i].param.dst,
  629. edesc->pset[i].param.a_b_cnt,
  630. edesc->pset[i].param.ccnt,
  631. edesc->pset[i].param.src_dst_bidx,
  632. edesc->pset[i].param.src_dst_cidx,
  633. edesc->pset[i].param.link_bcntrld);
  634. edesc->absync = ret;
  635. /*
  636. * Enable period interrupt only if it is requested
  637. */
  638. if (tx_flags & DMA_PREP_INTERRUPT)
  639. edesc->pset[i].param.opt |= TCINTEN;
  640. }
  641. /* Place the cyclic channel to highest priority queue */
  642. edma_assign_channel_eventq(echan->ch_num, EVENTQ_0);
  643. return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
  644. }
  645. static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
  646. {
  647. struct edma_chan *echan = data;
  648. struct device *dev = echan->vchan.chan.device->dev;
  649. struct edma_desc *edesc;
  650. struct edmacc_param p;
  651. edesc = echan->edesc;
  652. /* Pause the channel for non-cyclic */
  653. if (!edesc || (edesc && !edesc->cyclic))
  654. edma_pause(echan->ch_num);
  655. switch (ch_status) {
  656. case EDMA_DMA_COMPLETE:
  657. spin_lock(&echan->vchan.lock);
  658. if (edesc) {
  659. if (edesc->cyclic) {
  660. vchan_cyclic_callback(&edesc->vdesc);
  661. } else if (edesc->processed == edesc->pset_nr) {
  662. dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
  663. edesc->residue = 0;
  664. edma_stop(echan->ch_num);
  665. vchan_cookie_complete(&edesc->vdesc);
  666. edma_execute(echan);
  667. } else {
  668. dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
  669. /* Update statistics for tx_status */
  670. edesc->residue -= edesc->sg_len;
  671. edesc->residue_stat = edesc->residue;
  672. edesc->processed_stat = edesc->processed;
  673. edma_execute(echan);
  674. }
  675. }
  676. spin_unlock(&echan->vchan.lock);
  677. break;
  678. case EDMA_DMA_CC_ERROR:
  679. spin_lock(&echan->vchan.lock);
  680. edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
  681. /*
  682. * Issue later based on missed flag which will be sure
  683. * to happen as:
  684. * (1) we finished transmitting an intermediate slot and
  685. * edma_execute is coming up.
  686. * (2) or we finished current transfer and issue will
  687. * call edma_execute.
  688. *
  689. * Important note: issuing can be dangerous here and
  690. * lead to some nasty recursion when we are in a NULL
  691. * slot. So we avoid doing so and set the missed flag.
  692. */
  693. if (p.a_b_cnt == 0 && p.ccnt == 0) {
  694. dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n");
  695. echan->missed = 1;
  696. } else {
  697. /*
  698. * The slot is already programmed but the event got
  699. * missed, so its safe to issue it here.
  700. */
  701. dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n");
  702. edma_clean_channel(echan->ch_num);
  703. edma_stop(echan->ch_num);
  704. edma_start(echan->ch_num);
  705. edma_trigger_channel(echan->ch_num);
  706. }
  707. spin_unlock(&echan->vchan.lock);
  708. break;
  709. default:
  710. break;
  711. }
  712. }
  713. /* Alloc channel resources */
  714. static int edma_alloc_chan_resources(struct dma_chan *chan)
  715. {
  716. struct edma_chan *echan = to_edma_chan(chan);
  717. struct device *dev = chan->device->dev;
  718. int ret;
  719. int a_ch_num;
  720. LIST_HEAD(descs);
  721. a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
  722. chan, EVENTQ_DEFAULT);
  723. if (a_ch_num < 0) {
  724. ret = -ENODEV;
  725. goto err_no_chan;
  726. }
  727. if (a_ch_num != echan->ch_num) {
  728. dev_err(dev, "failed to allocate requested channel %u:%u\n",
  729. EDMA_CTLR(echan->ch_num),
  730. EDMA_CHAN_SLOT(echan->ch_num));
  731. ret = -ENODEV;
  732. goto err_wrong_chan;
  733. }
  734. echan->alloced = true;
  735. echan->slot[0] = echan->ch_num;
  736. dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
  737. EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
  738. return 0;
  739. err_wrong_chan:
  740. edma_free_channel(a_ch_num);
  741. err_no_chan:
  742. return ret;
  743. }
  744. /* Free channel resources */
  745. static void edma_free_chan_resources(struct dma_chan *chan)
  746. {
  747. struct edma_chan *echan = to_edma_chan(chan);
  748. struct device *dev = chan->device->dev;
  749. int i;
  750. /* Terminate transfers */
  751. edma_stop(echan->ch_num);
  752. vchan_free_chan_resources(&echan->vchan);
  753. /* Free EDMA PaRAM slots */
  754. for (i = 1; i < EDMA_MAX_SLOTS; i++) {
  755. if (echan->slot[i] >= 0) {
  756. edma_free_slot(echan->slot[i]);
  757. echan->slot[i] = -1;
  758. }
  759. }
  760. /* Free EDMA channel */
  761. if (echan->alloced) {
  762. edma_free_channel(echan->ch_num);
  763. echan->alloced = false;
  764. }
  765. dev_dbg(dev, "freeing channel for %u\n", echan->ch_num);
  766. }
  767. /* Send pending descriptor to hardware */
  768. static void edma_issue_pending(struct dma_chan *chan)
  769. {
  770. struct edma_chan *echan = to_edma_chan(chan);
  771. unsigned long flags;
  772. spin_lock_irqsave(&echan->vchan.lock, flags);
  773. if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
  774. edma_execute(echan);
  775. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  776. }
  777. static u32 edma_residue(struct edma_desc *edesc)
  778. {
  779. bool dst = edesc->direction == DMA_DEV_TO_MEM;
  780. struct edma_pset *pset = edesc->pset;
  781. dma_addr_t done, pos;
  782. int i;
  783. /*
  784. * We always read the dst/src position from the first RamPar
  785. * pset. That's the one which is active now.
  786. */
  787. pos = edma_get_position(edesc->echan->slot[0], dst);
  788. /*
  789. * Cyclic is simple. Just subtract pset[0].addr from pos.
  790. *
  791. * We never update edesc->residue in the cyclic case, so we
  792. * can tell the remaining room to the end of the circular
  793. * buffer.
  794. */
  795. if (edesc->cyclic) {
  796. done = pos - pset->addr;
  797. edesc->residue_stat = edesc->residue - done;
  798. return edesc->residue_stat;
  799. }
  800. /*
  801. * For SG operation we catch up with the last processed
  802. * status.
  803. */
  804. pset += edesc->processed_stat;
  805. for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
  806. /*
  807. * If we are inside this pset address range, we know
  808. * this is the active one. Get the current delta and
  809. * stop walking the psets.
  810. */
  811. if (pos >= pset->addr && pos < pset->addr + pset->len)
  812. return edesc->residue_stat - (pos - pset->addr);
  813. /* Otherwise mark it done and update residue_stat. */
  814. edesc->processed_stat++;
  815. edesc->residue_stat -= pset->len;
  816. }
  817. return edesc->residue_stat;
  818. }
  819. /* Check request completion status */
  820. static enum dma_status edma_tx_status(struct dma_chan *chan,
  821. dma_cookie_t cookie,
  822. struct dma_tx_state *txstate)
  823. {
  824. struct edma_chan *echan = to_edma_chan(chan);
  825. struct virt_dma_desc *vdesc;
  826. enum dma_status ret;
  827. unsigned long flags;
  828. ret = dma_cookie_status(chan, cookie, txstate);
  829. if (ret == DMA_COMPLETE || !txstate)
  830. return ret;
  831. spin_lock_irqsave(&echan->vchan.lock, flags);
  832. if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
  833. txstate->residue = edma_residue(echan->edesc);
  834. else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
  835. txstate->residue = to_edma_desc(&vdesc->tx)->residue;
  836. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  837. return ret;
  838. }
  839. static void __init edma_chan_init(struct edma_cc *ecc,
  840. struct dma_device *dma,
  841. struct edma_chan *echans)
  842. {
  843. int i, j;
  844. for (i = 0; i < EDMA_CHANS; i++) {
  845. struct edma_chan *echan = &echans[i];
  846. echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
  847. echan->ecc = ecc;
  848. echan->vchan.desc_free = edma_desc_free;
  849. vchan_init(&echan->vchan, dma);
  850. INIT_LIST_HEAD(&echan->node);
  851. for (j = 0; j < EDMA_MAX_SLOTS; j++)
  852. echan->slot[j] = -1;
  853. }
  854. }
  855. #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  856. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  857. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  858. static int edma_dma_device_slave_caps(struct dma_chan *dchan,
  859. struct dma_slave_caps *caps)
  860. {
  861. caps->src_addr_widths = EDMA_DMA_BUSWIDTHS;
  862. caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS;
  863. caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  864. caps->cmd_pause = true;
  865. caps->cmd_terminate = true;
  866. caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  867. return 0;
  868. }
  869. static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
  870. struct device *dev)
  871. {
  872. dma->device_prep_slave_sg = edma_prep_slave_sg;
  873. dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
  874. dma->device_prep_dma_memcpy = edma_prep_dma_memcpy;
  875. dma->device_alloc_chan_resources = edma_alloc_chan_resources;
  876. dma->device_free_chan_resources = edma_free_chan_resources;
  877. dma->device_issue_pending = edma_issue_pending;
  878. dma->device_tx_status = edma_tx_status;
  879. dma->device_control = edma_control;
  880. dma->device_slave_caps = edma_dma_device_slave_caps;
  881. dma->dev = dev;
  882. /*
  883. * code using dma memcpy must make sure alignment of
  884. * length is at dma->copy_align boundary.
  885. */
  886. dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES;
  887. INIT_LIST_HEAD(&dma->channels);
  888. }
  889. static int edma_probe(struct platform_device *pdev)
  890. {
  891. struct edma_cc *ecc;
  892. int ret;
  893. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  894. if (ret)
  895. return ret;
  896. ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
  897. if (!ecc) {
  898. dev_err(&pdev->dev, "Can't allocate controller\n");
  899. return -ENOMEM;
  900. }
  901. ecc->ctlr = pdev->id;
  902. ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
  903. if (ecc->dummy_slot < 0) {
  904. dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
  905. return ecc->dummy_slot;
  906. }
  907. dma_cap_zero(ecc->dma_slave.cap_mask);
  908. dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
  909. dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask);
  910. dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask);
  911. edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
  912. edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
  913. ret = dma_async_device_register(&ecc->dma_slave);
  914. if (ret)
  915. goto err_reg1;
  916. platform_set_drvdata(pdev, ecc);
  917. dev_info(&pdev->dev, "TI EDMA DMA engine driver\n");
  918. return 0;
  919. err_reg1:
  920. edma_free_slot(ecc->dummy_slot);
  921. return ret;
  922. }
  923. static int edma_remove(struct platform_device *pdev)
  924. {
  925. struct device *dev = &pdev->dev;
  926. struct edma_cc *ecc = dev_get_drvdata(dev);
  927. dma_async_device_unregister(&ecc->dma_slave);
  928. edma_free_slot(ecc->dummy_slot);
  929. return 0;
  930. }
  931. static struct platform_driver edma_driver = {
  932. .probe = edma_probe,
  933. .remove = edma_remove,
  934. .driver = {
  935. .name = "edma-dma-engine",
  936. .owner = THIS_MODULE,
  937. },
  938. };
  939. bool edma_filter_fn(struct dma_chan *chan, void *param)
  940. {
  941. if (chan->device->dev->driver == &edma_driver.driver) {
  942. struct edma_chan *echan = to_edma_chan(chan);
  943. unsigned ch_req = *(unsigned *)param;
  944. return ch_req == echan->ch_num;
  945. }
  946. return false;
  947. }
  948. EXPORT_SYMBOL(edma_filter_fn);
  949. static struct platform_device *pdev0, *pdev1;
  950. static const struct platform_device_info edma_dev_info0 = {
  951. .name = "edma-dma-engine",
  952. .id = 0,
  953. .dma_mask = DMA_BIT_MASK(32),
  954. };
  955. static const struct platform_device_info edma_dev_info1 = {
  956. .name = "edma-dma-engine",
  957. .id = 1,
  958. .dma_mask = DMA_BIT_MASK(32),
  959. };
  960. static int edma_init(void)
  961. {
  962. int ret = platform_driver_register(&edma_driver);
  963. if (ret == 0) {
  964. pdev0 = platform_device_register_full(&edma_dev_info0);
  965. if (IS_ERR(pdev0)) {
  966. platform_driver_unregister(&edma_driver);
  967. ret = PTR_ERR(pdev0);
  968. goto out;
  969. }
  970. }
  971. if (EDMA_CTLRS == 2) {
  972. pdev1 = platform_device_register_full(&edma_dev_info1);
  973. if (IS_ERR(pdev1)) {
  974. platform_driver_unregister(&edma_driver);
  975. platform_device_unregister(pdev0);
  976. ret = PTR_ERR(pdev1);
  977. }
  978. }
  979. out:
  980. return ret;
  981. }
  982. subsys_initcall(edma_init);
  983. static void __exit edma_exit(void)
  984. {
  985. platform_device_unregister(pdev0);
  986. if (pdev1)
  987. platform_device_unregister(pdev1);
  988. platform_driver_unregister(&edma_driver);
  989. }
  990. module_exit(edma_exit);
  991. MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
  992. MODULE_DESCRIPTION("TI EDMA DMA engine driver");
  993. MODULE_LICENSE("GPL v2");