at_hdmac.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314
  1. /*
  2. * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
  3. *
  4. * Copyright (C) 2008 Atmel Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. *
  12. * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
  13. * The only Atmel DMA Controller that is not covered by this driver is the one
  14. * found on AT91SAM9263.
  15. */
  16. #include <dt-bindings/dma/at91.h>
  17. #include <linux/clk.h>
  18. #include <linux/dmaengine.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/dmapool.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/module.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/slab.h>
  25. #include <linux/of.h>
  26. #include <linux/of_device.h>
  27. #include <linux/of_dma.h>
  28. #include "at_hdmac_regs.h"
  29. #include "dmaengine.h"
  30. /*
  31. * Glossary
  32. * --------
  33. *
  34. * at_hdmac : Name of the ATmel AHB DMA Controller
  35. * at_dma_ / atdma : ATmel DMA controller entity related
  36. * atc_ / atchan : ATmel DMA Channel entity related
  37. */
  38. #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
  39. #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
  40. |ATC_DIF(AT_DMA_MEM_IF))
  41. #define ATC_DMA_BUSWIDTHS\
  42. (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
  43. BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
  44. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
  45. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  46. #define ATC_MAX_DSCR_TRIALS 10
  47. /*
  48. * Initial number of descriptors to allocate for each channel. This could
  49. * be increased during dma usage.
  50. */
  51. static unsigned int init_nr_desc_per_channel = 64;
  52. module_param(init_nr_desc_per_channel, uint, 0644);
  53. MODULE_PARM_DESC(init_nr_desc_per_channel,
  54. "initial descriptors per channel (default: 64)");
  55. /* prototypes */
  56. static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
  57. static void atc_issue_pending(struct dma_chan *chan);
  58. /*----------------------------------------------------------------------*/
  59. static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
  60. size_t len)
  61. {
  62. unsigned int width;
  63. if (!((src | dst | len) & 3))
  64. width = 2;
  65. else if (!((src | dst | len) & 1))
  66. width = 1;
  67. else
  68. width = 0;
  69. return width;
  70. }
  71. static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
  72. {
  73. return list_first_entry(&atchan->active_list,
  74. struct at_desc, desc_node);
  75. }
  76. static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
  77. {
  78. return list_first_entry(&atchan->queue,
  79. struct at_desc, desc_node);
  80. }
  81. /**
  82. * atc_alloc_descriptor - allocate and return an initialized descriptor
  83. * @chan: the channel to allocate descriptors for
  84. * @gfp_flags: GFP allocation flags
  85. *
  86. * Note: The ack-bit is positioned in the descriptor flag at creation time
  87. * to make initial allocation more convenient. This bit will be cleared
  88. * and control will be given to client at usage time (during
  89. * preparation functions).
  90. */
  91. static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
  92. gfp_t gfp_flags)
  93. {
  94. struct at_desc *desc = NULL;
  95. struct at_dma *atdma = to_at_dma(chan->device);
  96. dma_addr_t phys;
  97. desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
  98. if (desc) {
  99. INIT_LIST_HEAD(&desc->tx_list);
  100. dma_async_tx_descriptor_init(&desc->txd, chan);
  101. /* txd.flags will be overwritten in prep functions */
  102. desc->txd.flags = DMA_CTRL_ACK;
  103. desc->txd.tx_submit = atc_tx_submit;
  104. desc->txd.phys = phys;
  105. }
  106. return desc;
  107. }
  108. /**
  109. * atc_desc_get - get an unused descriptor from free_list
  110. * @atchan: channel we want a new descriptor for
  111. */
  112. static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
  113. {
  114. struct at_desc *desc, *_desc;
  115. struct at_desc *ret = NULL;
  116. unsigned long flags;
  117. unsigned int i = 0;
  118. LIST_HEAD(tmp_list);
  119. spin_lock_irqsave(&atchan->lock, flags);
  120. list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
  121. i++;
  122. if (async_tx_test_ack(&desc->txd)) {
  123. list_del(&desc->desc_node);
  124. ret = desc;
  125. break;
  126. }
  127. dev_dbg(chan2dev(&atchan->chan_common),
  128. "desc %p not ACKed\n", desc);
  129. }
  130. spin_unlock_irqrestore(&atchan->lock, flags);
  131. dev_vdbg(chan2dev(&atchan->chan_common),
  132. "scanned %u descriptors on freelist\n", i);
  133. /* no more descriptor available in initial pool: create one more */
  134. if (!ret) {
  135. ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
  136. if (ret) {
  137. spin_lock_irqsave(&atchan->lock, flags);
  138. atchan->descs_allocated++;
  139. spin_unlock_irqrestore(&atchan->lock, flags);
  140. } else {
  141. dev_err(chan2dev(&atchan->chan_common),
  142. "not enough descriptors available\n");
  143. }
  144. }
  145. return ret;
  146. }
  147. /**
  148. * atc_desc_put - move a descriptor, including any children, to the free list
  149. * @atchan: channel we work on
  150. * @desc: descriptor, at the head of a chain, to move to free list
  151. */
  152. static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
  153. {
  154. if (desc) {
  155. struct at_desc *child;
  156. unsigned long flags;
  157. spin_lock_irqsave(&atchan->lock, flags);
  158. list_for_each_entry(child, &desc->tx_list, desc_node)
  159. dev_vdbg(chan2dev(&atchan->chan_common),
  160. "moving child desc %p to freelist\n",
  161. child);
  162. list_splice_init(&desc->tx_list, &atchan->free_list);
  163. dev_vdbg(chan2dev(&atchan->chan_common),
  164. "moving desc %p to freelist\n", desc);
  165. list_add(&desc->desc_node, &atchan->free_list);
  166. spin_unlock_irqrestore(&atchan->lock, flags);
  167. }
  168. }
  169. /**
  170. * atc_desc_chain - build chain adding a descriptor
  171. * @first: address of first descriptor of the chain
  172. * @prev: address of previous descriptor of the chain
  173. * @desc: descriptor to queue
  174. *
  175. * Called from prep_* functions
  176. */
  177. static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
  178. struct at_desc *desc)
  179. {
  180. if (!(*first)) {
  181. *first = desc;
  182. } else {
  183. /* inform the HW lli about chaining */
  184. (*prev)->lli.dscr = desc->txd.phys;
  185. /* insert the link descriptor to the LD ring */
  186. list_add_tail(&desc->desc_node,
  187. &(*first)->tx_list);
  188. }
  189. *prev = desc;
  190. }
  191. /**
  192. * atc_dostart - starts the DMA engine for real
  193. * @atchan: the channel we want to start
  194. * @first: first descriptor in the list we want to begin with
  195. *
  196. * Called with atchan->lock held and bh disabled
  197. */
  198. static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
  199. {
  200. struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
  201. /* ASSERT: channel is idle */
  202. if (atc_chan_is_enabled(atchan)) {
  203. dev_err(chan2dev(&atchan->chan_common),
  204. "BUG: Attempted to start non-idle channel\n");
  205. dev_err(chan2dev(&atchan->chan_common),
  206. " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
  207. channel_readl(atchan, SADDR),
  208. channel_readl(atchan, DADDR),
  209. channel_readl(atchan, CTRLA),
  210. channel_readl(atchan, CTRLB),
  211. channel_readl(atchan, DSCR));
  212. /* The tasklet will hopefully advance the queue... */
  213. return;
  214. }
  215. vdbg_dump_regs(atchan);
  216. channel_writel(atchan, SADDR, 0);
  217. channel_writel(atchan, DADDR, 0);
  218. channel_writel(atchan, CTRLA, 0);
  219. channel_writel(atchan, CTRLB, 0);
  220. channel_writel(atchan, DSCR, first->txd.phys);
  221. channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
  222. ATC_SPIP_BOUNDARY(first->boundary));
  223. channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
  224. ATC_DPIP_BOUNDARY(first->boundary));
  225. dma_writel(atdma, CHER, atchan->mask);
  226. vdbg_dump_regs(atchan);
  227. }
  228. /*
  229. * atc_get_desc_by_cookie - get the descriptor of a cookie
  230. * @atchan: the DMA channel
  231. * @cookie: the cookie to get the descriptor for
  232. */
  233. static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
  234. dma_cookie_t cookie)
  235. {
  236. struct at_desc *desc, *_desc;
  237. list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
  238. if (desc->txd.cookie == cookie)
  239. return desc;
  240. }
  241. list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
  242. if (desc->txd.cookie == cookie)
  243. return desc;
  244. }
  245. return NULL;
  246. }
  247. /**
  248. * atc_calc_bytes_left - calculates the number of bytes left according to the
  249. * value read from CTRLA.
  250. *
  251. * @current_len: the number of bytes left before reading CTRLA
  252. * @ctrla: the value of CTRLA
  253. */
  254. static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
  255. {
  256. u32 btsize = (ctrla & ATC_BTSIZE_MAX);
  257. u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
  258. /*
  259. * According to the datasheet, when reading the Control A Register
  260. * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
  261. * number of transfers completed on the Source Interface.
  262. * So btsize is always a number of source width transfers.
  263. */
  264. return current_len - (btsize << src_width);
  265. }
  266. /**
  267. * atc_get_bytes_left - get the number of bytes residue for a cookie
  268. * @chan: DMA channel
  269. * @cookie: transaction identifier to check status of
  270. */
  271. static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
  272. {
  273. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  274. struct at_desc *desc_first = atc_first_active(atchan);
  275. struct at_desc *desc;
  276. int ret;
  277. u32 ctrla, dscr, trials;
  278. /*
  279. * If the cookie doesn't match to the currently running transfer then
  280. * we can return the total length of the associated DMA transfer,
  281. * because it is still queued.
  282. */
  283. desc = atc_get_desc_by_cookie(atchan, cookie);
  284. if (desc == NULL)
  285. return -EINVAL;
  286. else if (desc != desc_first)
  287. return desc->total_len;
  288. /* cookie matches to the currently running transfer */
  289. ret = desc_first->total_len;
  290. if (desc_first->lli.dscr) {
  291. /* hardware linked list transfer */
  292. /*
  293. * Calculate the residue by removing the length of the child
  294. * descriptors already transferred from the total length.
  295. * To get the current child descriptor we can use the value of
  296. * the channel's DSCR register and compare it against the value
  297. * of the hardware linked list structure of each child
  298. * descriptor.
  299. *
  300. * The CTRLA register provides us with the amount of data
  301. * already read from the source for the current child
  302. * descriptor. So we can compute a more accurate residue by also
  303. * removing the number of bytes corresponding to this amount of
  304. * data.
  305. *
  306. * However, the DSCR and CTRLA registers cannot be read both
  307. * atomically. Hence a race condition may occur: the first read
  308. * register may refer to one child descriptor whereas the second
  309. * read may refer to a later child descriptor in the list
  310. * because of the DMA transfer progression inbetween the two
  311. * reads.
  312. *
  313. * One solution could have been to pause the DMA transfer, read
  314. * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
  315. * this approach presents some drawbacks:
  316. * - If the DMA transfer is paused, RX overruns or TX underruns
  317. * are more likey to occur depending on the system latency.
  318. * Taking the USART driver as an example, it uses a cyclic DMA
  319. * transfer to read data from the Receive Holding Register
  320. * (RHR) to avoid RX overruns since the RHR is not protected
  321. * by any FIFO on most Atmel SoCs. So pausing the DMA transfer
  322. * to compute the residue would break the USART driver design.
  323. * - The atc_pause() function masks interrupts but we'd rather
  324. * avoid to do so for system latency purpose.
  325. *
  326. * Then we'd rather use another solution: the DSCR is read a
  327. * first time, the CTRLA is read in turn, next the DSCR is read
  328. * a second time. If the two consecutive read values of the DSCR
  329. * are the same then we assume both refers to the very same
  330. * child descriptor as well as the CTRLA value read inbetween
  331. * does. For cyclic tranfers, the assumption is that a full loop
  332. * is "not so fast".
  333. * If the two DSCR values are different, we read again the CTRLA
  334. * then the DSCR till two consecutive read values from DSCR are
  335. * equal or till the maxium trials is reach.
  336. * This algorithm is very unlikely not to find a stable value for
  337. * DSCR.
  338. */
  339. dscr = channel_readl(atchan, DSCR);
  340. rmb(); /* ensure DSCR is read before CTRLA */
  341. ctrla = channel_readl(atchan, CTRLA);
  342. for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
  343. u32 new_dscr;
  344. rmb(); /* ensure DSCR is read after CTRLA */
  345. new_dscr = channel_readl(atchan, DSCR);
  346. /*
  347. * If the DSCR register value has not changed inside the
  348. * DMA controller since the previous read, we assume
  349. * that both the dscr and ctrla values refers to the
  350. * very same descriptor.
  351. */
  352. if (likely(new_dscr == dscr))
  353. break;
  354. /*
  355. * DSCR has changed inside the DMA controller, so the
  356. * previouly read value of CTRLA may refer to an already
  357. * processed descriptor hence could be outdated.
  358. * We need to update ctrla to match the current
  359. * descriptor.
  360. */
  361. dscr = new_dscr;
  362. rmb(); /* ensure DSCR is read before CTRLA */
  363. ctrla = channel_readl(atchan, CTRLA);
  364. }
  365. if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
  366. return -ETIMEDOUT;
  367. /* for the first descriptor we can be more accurate */
  368. if (desc_first->lli.dscr == dscr)
  369. return atc_calc_bytes_left(ret, ctrla);
  370. ret -= desc_first->len;
  371. list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
  372. if (desc->lli.dscr == dscr)
  373. break;
  374. ret -= desc->len;
  375. }
  376. /*
  377. * For the current descriptor in the chain we can calculate
  378. * the remaining bytes using the channel's register.
  379. */
  380. ret = atc_calc_bytes_left(ret, ctrla);
  381. } else {
  382. /* single transfer */
  383. ctrla = channel_readl(atchan, CTRLA);
  384. ret = atc_calc_bytes_left(ret, ctrla);
  385. }
  386. return ret;
  387. }
  388. /**
  389. * atc_chain_complete - finish work for one transaction chain
  390. * @atchan: channel we work on
  391. * @desc: descriptor at the head of the chain we want do complete
  392. *
  393. * Called with atchan->lock held and bh disabled */
  394. static void
  395. atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
  396. {
  397. struct dma_async_tx_descriptor *txd = &desc->txd;
  398. struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
  399. dev_vdbg(chan2dev(&atchan->chan_common),
  400. "descriptor %u complete\n", txd->cookie);
  401. /* mark the descriptor as complete for non cyclic cases only */
  402. if (!atc_chan_is_cyclic(atchan))
  403. dma_cookie_complete(txd);
  404. /* If the transfer was a memset, free our temporary buffer */
  405. if (desc->memset_buffer) {
  406. dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
  407. desc->memset_paddr);
  408. desc->memset_buffer = false;
  409. }
  410. /* move children to free_list */
  411. list_splice_init(&desc->tx_list, &atchan->free_list);
  412. /* move myself to free_list */
  413. list_move(&desc->desc_node, &atchan->free_list);
  414. dma_descriptor_unmap(txd);
  415. /* for cyclic transfers,
  416. * no need to replay callback function while stopping */
  417. if (!atc_chan_is_cyclic(atchan)) {
  418. /*
  419. * The API requires that no submissions are done from a
  420. * callback, so we don't need to drop the lock here
  421. */
  422. dmaengine_desc_get_callback_invoke(txd, NULL);
  423. }
  424. dma_run_dependencies(txd);
  425. }
  426. /**
  427. * atc_complete_all - finish work for all transactions
  428. * @atchan: channel to complete transactions for
  429. *
  430. * Eventually submit queued descriptors if any
  431. *
  432. * Assume channel is idle while calling this function
  433. * Called with atchan->lock held and bh disabled
  434. */
  435. static void atc_complete_all(struct at_dma_chan *atchan)
  436. {
  437. struct at_desc *desc, *_desc;
  438. LIST_HEAD(list);
  439. dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
  440. /*
  441. * Submit queued descriptors ASAP, i.e. before we go through
  442. * the completed ones.
  443. */
  444. if (!list_empty(&atchan->queue))
  445. atc_dostart(atchan, atc_first_queued(atchan));
  446. /* empty active_list now it is completed */
  447. list_splice_init(&atchan->active_list, &list);
  448. /* empty queue list by moving descriptors (if any) to active_list */
  449. list_splice_init(&atchan->queue, &atchan->active_list);
  450. list_for_each_entry_safe(desc, _desc, &list, desc_node)
  451. atc_chain_complete(atchan, desc);
  452. }
  453. /**
  454. * atc_advance_work - at the end of a transaction, move forward
  455. * @atchan: channel where the transaction ended
  456. *
  457. * Called with atchan->lock held and bh disabled
  458. */
  459. static void atc_advance_work(struct at_dma_chan *atchan)
  460. {
  461. dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
  462. if (atc_chan_is_enabled(atchan))
  463. return;
  464. if (list_empty(&atchan->active_list) ||
  465. list_is_singular(&atchan->active_list)) {
  466. atc_complete_all(atchan);
  467. } else {
  468. atc_chain_complete(atchan, atc_first_active(atchan));
  469. /* advance work */
  470. atc_dostart(atchan, atc_first_active(atchan));
  471. }
  472. }
  473. /**
  474. * atc_handle_error - handle errors reported by DMA controller
  475. * @atchan: channel where error occurs
  476. *
  477. * Called with atchan->lock held and bh disabled
  478. */
  479. static void atc_handle_error(struct at_dma_chan *atchan)
  480. {
  481. struct at_desc *bad_desc;
  482. struct at_desc *child;
  483. /*
  484. * The descriptor currently at the head of the active list is
  485. * broked. Since we don't have any way to report errors, we'll
  486. * just have to scream loudly and try to carry on.
  487. */
  488. bad_desc = atc_first_active(atchan);
  489. list_del_init(&bad_desc->desc_node);
  490. /* As we are stopped, take advantage to push queued descriptors
  491. * in active_list */
  492. list_splice_init(&atchan->queue, atchan->active_list.prev);
  493. /* Try to restart the controller */
  494. if (!list_empty(&atchan->active_list))
  495. atc_dostart(atchan, atc_first_active(atchan));
  496. /*
  497. * KERN_CRITICAL may seem harsh, but since this only happens
  498. * when someone submits a bad physical address in a
  499. * descriptor, we should consider ourselves lucky that the
  500. * controller flagged an error instead of scribbling over
  501. * random memory locations.
  502. */
  503. dev_crit(chan2dev(&atchan->chan_common),
  504. "Bad descriptor submitted for DMA!\n");
  505. dev_crit(chan2dev(&atchan->chan_common),
  506. " cookie: %d\n", bad_desc->txd.cookie);
  507. atc_dump_lli(atchan, &bad_desc->lli);
  508. list_for_each_entry(child, &bad_desc->tx_list, desc_node)
  509. atc_dump_lli(atchan, &child->lli);
  510. /* Pretend the descriptor completed successfully */
  511. atc_chain_complete(atchan, bad_desc);
  512. }
  513. /**
  514. * atc_handle_cyclic - at the end of a period, run callback function
  515. * @atchan: channel used for cyclic operations
  516. *
  517. * Called with atchan->lock held and bh disabled
  518. */
  519. static void atc_handle_cyclic(struct at_dma_chan *atchan)
  520. {
  521. struct at_desc *first = atc_first_active(atchan);
  522. struct dma_async_tx_descriptor *txd = &first->txd;
  523. dev_vdbg(chan2dev(&atchan->chan_common),
  524. "new cyclic period llp 0x%08x\n",
  525. channel_readl(atchan, DSCR));
  526. dmaengine_desc_get_callback_invoke(txd, NULL);
  527. }
  528. /*-- IRQ & Tasklet ---------------------------------------------------*/
  529. static void atc_tasklet(unsigned long data)
  530. {
  531. struct at_dma_chan *atchan = (struct at_dma_chan *)data;
  532. unsigned long flags;
  533. spin_lock_irqsave(&atchan->lock, flags);
  534. if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
  535. atc_handle_error(atchan);
  536. else if (atc_chan_is_cyclic(atchan))
  537. atc_handle_cyclic(atchan);
  538. else
  539. atc_advance_work(atchan);
  540. spin_unlock_irqrestore(&atchan->lock, flags);
  541. }
  542. static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
  543. {
  544. struct at_dma *atdma = (struct at_dma *)dev_id;
  545. struct at_dma_chan *atchan;
  546. int i;
  547. u32 status, pending, imr;
  548. int ret = IRQ_NONE;
  549. do {
  550. imr = dma_readl(atdma, EBCIMR);
  551. status = dma_readl(atdma, EBCISR);
  552. pending = status & imr;
  553. if (!pending)
  554. break;
  555. dev_vdbg(atdma->dma_common.dev,
  556. "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
  557. status, imr, pending);
  558. for (i = 0; i < atdma->dma_common.chancnt; i++) {
  559. atchan = &atdma->chan[i];
  560. if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
  561. if (pending & AT_DMA_ERR(i)) {
  562. /* Disable channel on AHB error */
  563. dma_writel(atdma, CHDR,
  564. AT_DMA_RES(i) | atchan->mask);
  565. /* Give information to tasklet */
  566. set_bit(ATC_IS_ERROR, &atchan->status);
  567. }
  568. tasklet_schedule(&atchan->tasklet);
  569. ret = IRQ_HANDLED;
  570. }
  571. }
  572. } while (pending);
  573. return ret;
  574. }
  575. /*-- DMA Engine API --------------------------------------------------*/
  576. /**
  577. * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
  578. * @desc: descriptor at the head of the transaction chain
  579. *
  580. * Queue chain if DMA engine is working already
  581. *
  582. * Cookie increment and adding to active_list or queue must be atomic
  583. */
  584. static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
  585. {
  586. struct at_desc *desc = txd_to_at_desc(tx);
  587. struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
  588. dma_cookie_t cookie;
  589. unsigned long flags;
  590. spin_lock_irqsave(&atchan->lock, flags);
  591. cookie = dma_cookie_assign(tx);
  592. if (list_empty(&atchan->active_list)) {
  593. dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
  594. desc->txd.cookie);
  595. atc_dostart(atchan, desc);
  596. list_add_tail(&desc->desc_node, &atchan->active_list);
  597. } else {
  598. dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
  599. desc->txd.cookie);
  600. list_add_tail(&desc->desc_node, &atchan->queue);
  601. }
  602. spin_unlock_irqrestore(&atchan->lock, flags);
  603. return cookie;
  604. }
  605. /**
  606. * atc_prep_dma_interleaved - prepare memory to memory interleaved operation
  607. * @chan: the channel to prepare operation on
  608. * @xt: Interleaved transfer template
  609. * @flags: tx descriptor status flags
  610. */
  611. static struct dma_async_tx_descriptor *
  612. atc_prep_dma_interleaved(struct dma_chan *chan,
  613. struct dma_interleaved_template *xt,
  614. unsigned long flags)
  615. {
  616. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  617. struct data_chunk *first = xt->sgl;
  618. struct at_desc *desc = NULL;
  619. size_t xfer_count;
  620. unsigned int dwidth;
  621. u32 ctrla;
  622. u32 ctrlb;
  623. size_t len = 0;
  624. int i;
  625. if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
  626. return NULL;
  627. dev_info(chan2dev(chan),
  628. "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
  629. __func__, &xt->src_start, &xt->dst_start, xt->numf,
  630. xt->frame_size, flags);
  631. /*
  632. * The controller can only "skip" X bytes every Y bytes, so we
  633. * need to make sure we are given a template that fit that
  634. * description, ie a template with chunks that always have the
  635. * same size, with the same ICGs.
  636. */
  637. for (i = 0; i < xt->frame_size; i++) {
  638. struct data_chunk *chunk = xt->sgl + i;
  639. if ((chunk->size != xt->sgl->size) ||
  640. (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
  641. (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
  642. dev_err(chan2dev(chan),
  643. "%s: the controller can transfer only identical chunks\n",
  644. __func__);
  645. return NULL;
  646. }
  647. len += chunk->size;
  648. }
  649. dwidth = atc_get_xfer_width(xt->src_start,
  650. xt->dst_start, len);
  651. xfer_count = len >> dwidth;
  652. if (xfer_count > ATC_BTSIZE_MAX) {
  653. dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
  654. return NULL;
  655. }
  656. ctrla = ATC_SRC_WIDTH(dwidth) |
  657. ATC_DST_WIDTH(dwidth);
  658. ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
  659. | ATC_SRC_ADDR_MODE_INCR
  660. | ATC_DST_ADDR_MODE_INCR
  661. | ATC_SRC_PIP
  662. | ATC_DST_PIP
  663. | ATC_FC_MEM2MEM;
  664. /* create the transfer */
  665. desc = atc_desc_get(atchan);
  666. if (!desc) {
  667. dev_err(chan2dev(chan),
  668. "%s: couldn't allocate our descriptor\n", __func__);
  669. return NULL;
  670. }
  671. desc->lli.saddr = xt->src_start;
  672. desc->lli.daddr = xt->dst_start;
  673. desc->lli.ctrla = ctrla | xfer_count;
  674. desc->lli.ctrlb = ctrlb;
  675. desc->boundary = first->size >> dwidth;
  676. desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
  677. desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
  678. desc->txd.cookie = -EBUSY;
  679. desc->total_len = desc->len = len;
  680. /* set end-of-link to the last link descriptor of list*/
  681. set_desc_eol(desc);
  682. desc->txd.flags = flags; /* client is in control of this ack */
  683. return &desc->txd;
  684. }
  685. /**
  686. * atc_prep_dma_memcpy - prepare a memcpy operation
  687. * @chan: the channel to prepare operation on
  688. * @dest: operation virtual destination address
  689. * @src: operation virtual source address
  690. * @len: operation length
  691. * @flags: tx descriptor status flags
  692. */
  693. static struct dma_async_tx_descriptor *
  694. atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  695. size_t len, unsigned long flags)
  696. {
  697. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  698. struct at_desc *desc = NULL;
  699. struct at_desc *first = NULL;
  700. struct at_desc *prev = NULL;
  701. size_t xfer_count;
  702. size_t offset;
  703. unsigned int src_width;
  704. unsigned int dst_width;
  705. u32 ctrla;
  706. u32 ctrlb;
  707. dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
  708. &dest, &src, len, flags);
  709. if (unlikely(!len)) {
  710. dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
  711. return NULL;
  712. }
  713. ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
  714. | ATC_SRC_ADDR_MODE_INCR
  715. | ATC_DST_ADDR_MODE_INCR
  716. | ATC_FC_MEM2MEM;
  717. /*
  718. * We can be a lot more clever here, but this should take care
  719. * of the most common optimization.
  720. */
  721. src_width = dst_width = atc_get_xfer_width(src, dest, len);
  722. ctrla = ATC_SRC_WIDTH(src_width) |
  723. ATC_DST_WIDTH(dst_width);
  724. for (offset = 0; offset < len; offset += xfer_count << src_width) {
  725. xfer_count = min_t(size_t, (len - offset) >> src_width,
  726. ATC_BTSIZE_MAX);
  727. desc = atc_desc_get(atchan);
  728. if (!desc)
  729. goto err_desc_get;
  730. desc->lli.saddr = src + offset;
  731. desc->lli.daddr = dest + offset;
  732. desc->lli.ctrla = ctrla | xfer_count;
  733. desc->lli.ctrlb = ctrlb;
  734. desc->txd.cookie = 0;
  735. desc->len = xfer_count << src_width;
  736. atc_desc_chain(&first, &prev, desc);
  737. }
  738. /* First descriptor of the chain embedds additional information */
  739. first->txd.cookie = -EBUSY;
  740. first->total_len = len;
  741. /* set end-of-link to the last link descriptor of list*/
  742. set_desc_eol(desc);
  743. first->txd.flags = flags; /* client is in control of this ack */
  744. return &first->txd;
  745. err_desc_get:
  746. atc_desc_put(atchan, first);
  747. return NULL;
  748. }
  749. static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
  750. dma_addr_t psrc,
  751. dma_addr_t pdst,
  752. size_t len)
  753. {
  754. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  755. struct at_desc *desc;
  756. size_t xfer_count;
  757. u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
  758. u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
  759. ATC_SRC_ADDR_MODE_FIXED |
  760. ATC_DST_ADDR_MODE_INCR |
  761. ATC_FC_MEM2MEM;
  762. xfer_count = len >> 2;
  763. if (xfer_count > ATC_BTSIZE_MAX) {
  764. dev_err(chan2dev(chan), "%s: buffer is too big\n",
  765. __func__);
  766. return NULL;
  767. }
  768. desc = atc_desc_get(atchan);
  769. if (!desc) {
  770. dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
  771. __func__);
  772. return NULL;
  773. }
  774. desc->lli.saddr = psrc;
  775. desc->lli.daddr = pdst;
  776. desc->lli.ctrla = ctrla | xfer_count;
  777. desc->lli.ctrlb = ctrlb;
  778. desc->txd.cookie = 0;
  779. desc->len = len;
  780. return desc;
  781. }
  782. /**
  783. * atc_prep_dma_memset - prepare a memcpy operation
  784. * @chan: the channel to prepare operation on
  785. * @dest: operation virtual destination address
  786. * @value: value to set memory buffer to
  787. * @len: operation length
  788. * @flags: tx descriptor status flags
  789. */
  790. static struct dma_async_tx_descriptor *
  791. atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
  792. size_t len, unsigned long flags)
  793. {
  794. struct at_dma *atdma = to_at_dma(chan->device);
  795. struct at_desc *desc;
  796. void __iomem *vaddr;
  797. dma_addr_t paddr;
  798. dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
  799. &dest, value, len, flags);
  800. if (unlikely(!len)) {
  801. dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
  802. return NULL;
  803. }
  804. if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
  805. dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
  806. __func__);
  807. return NULL;
  808. }
  809. vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
  810. if (!vaddr) {
  811. dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
  812. __func__);
  813. return NULL;
  814. }
  815. *(u32*)vaddr = value;
  816. desc = atc_create_memset_desc(chan, paddr, dest, len);
  817. if (!desc) {
  818. dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
  819. __func__);
  820. goto err_free_buffer;
  821. }
  822. desc->memset_paddr = paddr;
  823. desc->memset_vaddr = vaddr;
  824. desc->memset_buffer = true;
  825. desc->txd.cookie = -EBUSY;
  826. desc->total_len = len;
  827. /* set end-of-link on the descriptor */
  828. set_desc_eol(desc);
  829. desc->txd.flags = flags;
  830. return &desc->txd;
  831. err_free_buffer:
  832. dma_pool_free(atdma->memset_pool, vaddr, paddr);
  833. return NULL;
  834. }
  835. static struct dma_async_tx_descriptor *
  836. atc_prep_dma_memset_sg(struct dma_chan *chan,
  837. struct scatterlist *sgl,
  838. unsigned int sg_len, int value,
  839. unsigned long flags)
  840. {
  841. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  842. struct at_dma *atdma = to_at_dma(chan->device);
  843. struct at_desc *desc = NULL, *first = NULL, *prev = NULL;
  844. struct scatterlist *sg;
  845. void __iomem *vaddr;
  846. dma_addr_t paddr;
  847. size_t total_len = 0;
  848. int i;
  849. dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
  850. value, sg_len, flags);
  851. if (unlikely(!sgl || !sg_len)) {
  852. dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
  853. __func__);
  854. return NULL;
  855. }
  856. vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
  857. if (!vaddr) {
  858. dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
  859. __func__);
  860. return NULL;
  861. }
  862. *(u32*)vaddr = value;
  863. for_each_sg(sgl, sg, sg_len, i) {
  864. dma_addr_t dest = sg_dma_address(sg);
  865. size_t len = sg_dma_len(sg);
  866. dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
  867. __func__, &dest, len);
  868. if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
  869. dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
  870. __func__);
  871. goto err_put_desc;
  872. }
  873. desc = atc_create_memset_desc(chan, paddr, dest, len);
  874. if (!desc)
  875. goto err_put_desc;
  876. atc_desc_chain(&first, &prev, desc);
  877. total_len += len;
  878. }
  879. /*
  880. * Only set the buffer pointers on the last descriptor to
  881. * avoid free'ing while we have our transfer still going
  882. */
  883. desc->memset_paddr = paddr;
  884. desc->memset_vaddr = vaddr;
  885. desc->memset_buffer = true;
  886. first->txd.cookie = -EBUSY;
  887. first->total_len = total_len;
  888. /* set end-of-link on the descriptor */
  889. set_desc_eol(desc);
  890. first->txd.flags = flags;
  891. return &first->txd;
  892. err_put_desc:
  893. atc_desc_put(atchan, first);
  894. return NULL;
  895. }
  896. /**
  897. * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
  898. * @chan: DMA channel
  899. * @sgl: scatterlist to transfer to/from
  900. * @sg_len: number of entries in @scatterlist
  901. * @direction: DMA direction
  902. * @flags: tx descriptor status flags
  903. * @context: transaction context (ignored)
  904. */
  905. static struct dma_async_tx_descriptor *
  906. atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  907. unsigned int sg_len, enum dma_transfer_direction direction,
  908. unsigned long flags, void *context)
  909. {
  910. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  911. struct at_dma_slave *atslave = chan->private;
  912. struct dma_slave_config *sconfig = &atchan->dma_sconfig;
  913. struct at_desc *first = NULL;
  914. struct at_desc *prev = NULL;
  915. u32 ctrla;
  916. u32 ctrlb;
  917. dma_addr_t reg;
  918. unsigned int reg_width;
  919. unsigned int mem_width;
  920. unsigned int i;
  921. struct scatterlist *sg;
  922. size_t total_len = 0;
  923. dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
  924. sg_len,
  925. direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
  926. flags);
  927. if (unlikely(!atslave || !sg_len)) {
  928. dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
  929. return NULL;
  930. }
  931. ctrla = ATC_SCSIZE(sconfig->src_maxburst)
  932. | ATC_DCSIZE(sconfig->dst_maxburst);
  933. ctrlb = ATC_IEN;
  934. switch (direction) {
  935. case DMA_MEM_TO_DEV:
  936. reg_width = convert_buswidth(sconfig->dst_addr_width);
  937. ctrla |= ATC_DST_WIDTH(reg_width);
  938. ctrlb |= ATC_DST_ADDR_MODE_FIXED
  939. | ATC_SRC_ADDR_MODE_INCR
  940. | ATC_FC_MEM2PER
  941. | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
  942. reg = sconfig->dst_addr;
  943. for_each_sg(sgl, sg, sg_len, i) {
  944. struct at_desc *desc;
  945. u32 len;
  946. u32 mem;
  947. desc = atc_desc_get(atchan);
  948. if (!desc)
  949. goto err_desc_get;
  950. mem = sg_dma_address(sg);
  951. len = sg_dma_len(sg);
  952. if (unlikely(!len)) {
  953. dev_dbg(chan2dev(chan),
  954. "prep_slave_sg: sg(%d) data length is zero\n", i);
  955. goto err;
  956. }
  957. mem_width = 2;
  958. if (unlikely(mem & 3 || len & 3))
  959. mem_width = 0;
  960. desc->lli.saddr = mem;
  961. desc->lli.daddr = reg;
  962. desc->lli.ctrla = ctrla
  963. | ATC_SRC_WIDTH(mem_width)
  964. | len >> mem_width;
  965. desc->lli.ctrlb = ctrlb;
  966. desc->len = len;
  967. atc_desc_chain(&first, &prev, desc);
  968. total_len += len;
  969. }
  970. break;
  971. case DMA_DEV_TO_MEM:
  972. reg_width = convert_buswidth(sconfig->src_addr_width);
  973. ctrla |= ATC_SRC_WIDTH(reg_width);
  974. ctrlb |= ATC_DST_ADDR_MODE_INCR
  975. | ATC_SRC_ADDR_MODE_FIXED
  976. | ATC_FC_PER2MEM
  977. | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
  978. reg = sconfig->src_addr;
  979. for_each_sg(sgl, sg, sg_len, i) {
  980. struct at_desc *desc;
  981. u32 len;
  982. u32 mem;
  983. desc = atc_desc_get(atchan);
  984. if (!desc)
  985. goto err_desc_get;
  986. mem = sg_dma_address(sg);
  987. len = sg_dma_len(sg);
  988. if (unlikely(!len)) {
  989. dev_dbg(chan2dev(chan),
  990. "prep_slave_sg: sg(%d) data length is zero\n", i);
  991. goto err;
  992. }
  993. mem_width = 2;
  994. if (unlikely(mem & 3 || len & 3))
  995. mem_width = 0;
  996. desc->lli.saddr = reg;
  997. desc->lli.daddr = mem;
  998. desc->lli.ctrla = ctrla
  999. | ATC_DST_WIDTH(mem_width)
  1000. | len >> reg_width;
  1001. desc->lli.ctrlb = ctrlb;
  1002. desc->len = len;
  1003. atc_desc_chain(&first, &prev, desc);
  1004. total_len += len;
  1005. }
  1006. break;
  1007. default:
  1008. return NULL;
  1009. }
  1010. /* set end-of-link to the last link descriptor of list*/
  1011. set_desc_eol(prev);
  1012. /* First descriptor of the chain embedds additional information */
  1013. first->txd.cookie = -EBUSY;
  1014. first->total_len = total_len;
  1015. /* first link descriptor of list is responsible of flags */
  1016. first->txd.flags = flags; /* client is in control of this ack */
  1017. return &first->txd;
  1018. err_desc_get:
  1019. dev_err(chan2dev(chan), "not enough descriptors available\n");
  1020. err:
  1021. atc_desc_put(atchan, first);
  1022. return NULL;
  1023. }
  1024. /**
  1025. * atc_prep_dma_sg - prepare memory to memory scather-gather operation
  1026. * @chan: the channel to prepare operation on
  1027. * @dst_sg: destination scatterlist
  1028. * @dst_nents: number of destination scatterlist entries
  1029. * @src_sg: source scatterlist
  1030. * @src_nents: number of source scatterlist entries
  1031. * @flags: tx descriptor status flags
  1032. */
  1033. static struct dma_async_tx_descriptor *
  1034. atc_prep_dma_sg(struct dma_chan *chan,
  1035. struct scatterlist *dst_sg, unsigned int dst_nents,
  1036. struct scatterlist *src_sg, unsigned int src_nents,
  1037. unsigned long flags)
  1038. {
  1039. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1040. struct at_desc *desc = NULL;
  1041. struct at_desc *first = NULL;
  1042. struct at_desc *prev = NULL;
  1043. unsigned int src_width;
  1044. unsigned int dst_width;
  1045. size_t xfer_count;
  1046. u32 ctrla;
  1047. u32 ctrlb;
  1048. size_t dst_len = 0, src_len = 0;
  1049. dma_addr_t dst = 0, src = 0;
  1050. size_t len = 0, total_len = 0;
  1051. if (unlikely(dst_nents == 0 || src_nents == 0))
  1052. return NULL;
  1053. if (unlikely(dst_sg == NULL || src_sg == NULL))
  1054. return NULL;
  1055. ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
  1056. | ATC_SRC_ADDR_MODE_INCR
  1057. | ATC_DST_ADDR_MODE_INCR
  1058. | ATC_FC_MEM2MEM;
  1059. /*
  1060. * loop until there is either no more source or no more destination
  1061. * scatterlist entry
  1062. */
  1063. while (true) {
  1064. /* prepare the next transfer */
  1065. if (dst_len == 0) {
  1066. /* no more destination scatterlist entries */
  1067. if (!dst_sg || !dst_nents)
  1068. break;
  1069. dst = sg_dma_address(dst_sg);
  1070. dst_len = sg_dma_len(dst_sg);
  1071. dst_sg = sg_next(dst_sg);
  1072. dst_nents--;
  1073. }
  1074. if (src_len == 0) {
  1075. /* no more source scatterlist entries */
  1076. if (!src_sg || !src_nents)
  1077. break;
  1078. src = sg_dma_address(src_sg);
  1079. src_len = sg_dma_len(src_sg);
  1080. src_sg = sg_next(src_sg);
  1081. src_nents--;
  1082. }
  1083. len = min_t(size_t, src_len, dst_len);
  1084. if (len == 0)
  1085. continue;
  1086. /* take care for the alignment */
  1087. src_width = dst_width = atc_get_xfer_width(src, dst, len);
  1088. ctrla = ATC_SRC_WIDTH(src_width) |
  1089. ATC_DST_WIDTH(dst_width);
  1090. /*
  1091. * The number of transfers to set up refer to the source width
  1092. * that depends on the alignment.
  1093. */
  1094. xfer_count = len >> src_width;
  1095. if (xfer_count > ATC_BTSIZE_MAX) {
  1096. xfer_count = ATC_BTSIZE_MAX;
  1097. len = ATC_BTSIZE_MAX << src_width;
  1098. }
  1099. /* create the transfer */
  1100. desc = atc_desc_get(atchan);
  1101. if (!desc)
  1102. goto err_desc_get;
  1103. desc->lli.saddr = src;
  1104. desc->lli.daddr = dst;
  1105. desc->lli.ctrla = ctrla | xfer_count;
  1106. desc->lli.ctrlb = ctrlb;
  1107. desc->txd.cookie = 0;
  1108. desc->len = len;
  1109. atc_desc_chain(&first, &prev, desc);
  1110. /* update the lengths and addresses for the next loop cycle */
  1111. dst_len -= len;
  1112. src_len -= len;
  1113. dst += len;
  1114. src += len;
  1115. total_len += len;
  1116. }
  1117. /* First descriptor of the chain embedds additional information */
  1118. first->txd.cookie = -EBUSY;
  1119. first->total_len = total_len;
  1120. /* set end-of-link to the last link descriptor of list*/
  1121. set_desc_eol(desc);
  1122. first->txd.flags = flags; /* client is in control of this ack */
  1123. return &first->txd;
  1124. err_desc_get:
  1125. atc_desc_put(atchan, first);
  1126. return NULL;
  1127. }
  1128. /**
  1129. * atc_dma_cyclic_check_values
  1130. * Check for too big/unaligned periods and unaligned DMA buffer
  1131. */
  1132. static int
  1133. atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
  1134. size_t period_len)
  1135. {
  1136. if (period_len > (ATC_BTSIZE_MAX << reg_width))
  1137. goto err_out;
  1138. if (unlikely(period_len & ((1 << reg_width) - 1)))
  1139. goto err_out;
  1140. if (unlikely(buf_addr & ((1 << reg_width) - 1)))
  1141. goto err_out;
  1142. return 0;
  1143. err_out:
  1144. return -EINVAL;
  1145. }
  1146. /**
  1147. * atc_dma_cyclic_fill_desc - Fill one period descriptor
  1148. */
  1149. static int
  1150. atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
  1151. unsigned int period_index, dma_addr_t buf_addr,
  1152. unsigned int reg_width, size_t period_len,
  1153. enum dma_transfer_direction direction)
  1154. {
  1155. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1156. struct dma_slave_config *sconfig = &atchan->dma_sconfig;
  1157. u32 ctrla;
  1158. /* prepare common CRTLA value */
  1159. ctrla = ATC_SCSIZE(sconfig->src_maxburst)
  1160. | ATC_DCSIZE(sconfig->dst_maxburst)
  1161. | ATC_DST_WIDTH(reg_width)
  1162. | ATC_SRC_WIDTH(reg_width)
  1163. | period_len >> reg_width;
  1164. switch (direction) {
  1165. case DMA_MEM_TO_DEV:
  1166. desc->lli.saddr = buf_addr + (period_len * period_index);
  1167. desc->lli.daddr = sconfig->dst_addr;
  1168. desc->lli.ctrla = ctrla;
  1169. desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
  1170. | ATC_SRC_ADDR_MODE_INCR
  1171. | ATC_FC_MEM2PER
  1172. | ATC_SIF(atchan->mem_if)
  1173. | ATC_DIF(atchan->per_if);
  1174. desc->len = period_len;
  1175. break;
  1176. case DMA_DEV_TO_MEM:
  1177. desc->lli.saddr = sconfig->src_addr;
  1178. desc->lli.daddr = buf_addr + (period_len * period_index);
  1179. desc->lli.ctrla = ctrla;
  1180. desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
  1181. | ATC_SRC_ADDR_MODE_FIXED
  1182. | ATC_FC_PER2MEM
  1183. | ATC_SIF(atchan->per_if)
  1184. | ATC_DIF(atchan->mem_if);
  1185. desc->len = period_len;
  1186. break;
  1187. default:
  1188. return -EINVAL;
  1189. }
  1190. return 0;
  1191. }
  1192. /**
  1193. * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
  1194. * @chan: the DMA channel to prepare
  1195. * @buf_addr: physical DMA address where the buffer starts
  1196. * @buf_len: total number of bytes for the entire buffer
  1197. * @period_len: number of bytes for each period
  1198. * @direction: transfer direction, to or from device
  1199. * @flags: tx descriptor status flags
  1200. */
  1201. static struct dma_async_tx_descriptor *
  1202. atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  1203. size_t period_len, enum dma_transfer_direction direction,
  1204. unsigned long flags)
  1205. {
  1206. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1207. struct at_dma_slave *atslave = chan->private;
  1208. struct dma_slave_config *sconfig = &atchan->dma_sconfig;
  1209. struct at_desc *first = NULL;
  1210. struct at_desc *prev = NULL;
  1211. unsigned long was_cyclic;
  1212. unsigned int reg_width;
  1213. unsigned int periods = buf_len / period_len;
  1214. unsigned int i;
  1215. dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
  1216. direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
  1217. &buf_addr,
  1218. periods, buf_len, period_len);
  1219. if (unlikely(!atslave || !buf_len || !period_len)) {
  1220. dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
  1221. return NULL;
  1222. }
  1223. was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
  1224. if (was_cyclic) {
  1225. dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
  1226. return NULL;
  1227. }
  1228. if (unlikely(!is_slave_direction(direction)))
  1229. goto err_out;
  1230. if (sconfig->direction == DMA_MEM_TO_DEV)
  1231. reg_width = convert_buswidth(sconfig->dst_addr_width);
  1232. else
  1233. reg_width = convert_buswidth(sconfig->src_addr_width);
  1234. /* Check for too big/unaligned periods and unaligned DMA buffer */
  1235. if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
  1236. goto err_out;
  1237. /* build cyclic linked list */
  1238. for (i = 0; i < periods; i++) {
  1239. struct at_desc *desc;
  1240. desc = atc_desc_get(atchan);
  1241. if (!desc)
  1242. goto err_desc_get;
  1243. if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
  1244. reg_width, period_len, direction))
  1245. goto err_desc_get;
  1246. atc_desc_chain(&first, &prev, desc);
  1247. }
  1248. /* lets make a cyclic list */
  1249. prev->lli.dscr = first->txd.phys;
  1250. /* First descriptor of the chain embedds additional information */
  1251. first->txd.cookie = -EBUSY;
  1252. first->total_len = buf_len;
  1253. return &first->txd;
  1254. err_desc_get:
  1255. dev_err(chan2dev(chan), "not enough descriptors available\n");
  1256. atc_desc_put(atchan, first);
  1257. err_out:
  1258. clear_bit(ATC_IS_CYCLIC, &atchan->status);
  1259. return NULL;
  1260. }
  1261. static int atc_config(struct dma_chan *chan,
  1262. struct dma_slave_config *sconfig)
  1263. {
  1264. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1265. dev_vdbg(chan2dev(chan), "%s\n", __func__);
  1266. /* Check if it is chan is configured for slave transfers */
  1267. if (!chan->private)
  1268. return -EINVAL;
  1269. memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
  1270. convert_burst(&atchan->dma_sconfig.src_maxburst);
  1271. convert_burst(&atchan->dma_sconfig.dst_maxburst);
  1272. return 0;
  1273. }
  1274. static int atc_pause(struct dma_chan *chan)
  1275. {
  1276. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1277. struct at_dma *atdma = to_at_dma(chan->device);
  1278. int chan_id = atchan->chan_common.chan_id;
  1279. unsigned long flags;
  1280. LIST_HEAD(list);
  1281. dev_vdbg(chan2dev(chan), "%s\n", __func__);
  1282. spin_lock_irqsave(&atchan->lock, flags);
  1283. dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
  1284. set_bit(ATC_IS_PAUSED, &atchan->status);
  1285. spin_unlock_irqrestore(&atchan->lock, flags);
  1286. return 0;
  1287. }
  1288. static int atc_resume(struct dma_chan *chan)
  1289. {
  1290. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1291. struct at_dma *atdma = to_at_dma(chan->device);
  1292. int chan_id = atchan->chan_common.chan_id;
  1293. unsigned long flags;
  1294. LIST_HEAD(list);
  1295. dev_vdbg(chan2dev(chan), "%s\n", __func__);
  1296. if (!atc_chan_is_paused(atchan))
  1297. return 0;
  1298. spin_lock_irqsave(&atchan->lock, flags);
  1299. dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
  1300. clear_bit(ATC_IS_PAUSED, &atchan->status);
  1301. spin_unlock_irqrestore(&atchan->lock, flags);
  1302. return 0;
  1303. }
  1304. static int atc_terminate_all(struct dma_chan *chan)
  1305. {
  1306. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1307. struct at_dma *atdma = to_at_dma(chan->device);
  1308. int chan_id = atchan->chan_common.chan_id;
  1309. struct at_desc *desc, *_desc;
  1310. unsigned long flags;
  1311. LIST_HEAD(list);
  1312. dev_vdbg(chan2dev(chan), "%s\n", __func__);
  1313. /*
  1314. * This is only called when something went wrong elsewhere, so
  1315. * we don't really care about the data. Just disable the
  1316. * channel. We still have to poll the channel enable bit due
  1317. * to AHB/HSB limitations.
  1318. */
  1319. spin_lock_irqsave(&atchan->lock, flags);
  1320. /* disabling channel: must also remove suspend state */
  1321. dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
  1322. /* confirm that this channel is disabled */
  1323. while (dma_readl(atdma, CHSR) & atchan->mask)
  1324. cpu_relax();
  1325. /* active_list entries will end up before queued entries */
  1326. list_splice_init(&atchan->queue, &list);
  1327. list_splice_init(&atchan->active_list, &list);
  1328. /* Flush all pending and queued descriptors */
  1329. list_for_each_entry_safe(desc, _desc, &list, desc_node)
  1330. atc_chain_complete(atchan, desc);
  1331. clear_bit(ATC_IS_PAUSED, &atchan->status);
  1332. /* if channel dedicated to cyclic operations, free it */
  1333. clear_bit(ATC_IS_CYCLIC, &atchan->status);
  1334. spin_unlock_irqrestore(&atchan->lock, flags);
  1335. return 0;
  1336. }
  1337. /**
  1338. * atc_tx_status - poll for transaction completion
  1339. * @chan: DMA channel
  1340. * @cookie: transaction identifier to check status of
  1341. * @txstate: if not %NULL updated with transaction state
  1342. *
  1343. * If @txstate is passed in, upon return it reflect the driver
  1344. * internal state and can be used with dma_async_is_complete() to check
  1345. * the status of multiple cookies without re-checking hardware state.
  1346. */
  1347. static enum dma_status
  1348. atc_tx_status(struct dma_chan *chan,
  1349. dma_cookie_t cookie,
  1350. struct dma_tx_state *txstate)
  1351. {
  1352. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1353. unsigned long flags;
  1354. enum dma_status ret;
  1355. int bytes = 0;
  1356. ret = dma_cookie_status(chan, cookie, txstate);
  1357. if (ret == DMA_COMPLETE)
  1358. return ret;
  1359. /*
  1360. * There's no point calculating the residue if there's
  1361. * no txstate to store the value.
  1362. */
  1363. if (!txstate)
  1364. return DMA_ERROR;
  1365. spin_lock_irqsave(&atchan->lock, flags);
  1366. /* Get number of bytes left in the active transactions */
  1367. bytes = atc_get_bytes_left(chan, cookie);
  1368. spin_unlock_irqrestore(&atchan->lock, flags);
  1369. if (unlikely(bytes < 0)) {
  1370. dev_vdbg(chan2dev(chan), "get residual bytes error\n");
  1371. return DMA_ERROR;
  1372. } else {
  1373. dma_set_residue(txstate, bytes);
  1374. }
  1375. dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
  1376. ret, cookie, bytes);
  1377. return ret;
  1378. }
  1379. /**
  1380. * atc_issue_pending - try to finish work
  1381. * @chan: target DMA channel
  1382. */
  1383. static void atc_issue_pending(struct dma_chan *chan)
  1384. {
  1385. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1386. unsigned long flags;
  1387. dev_vdbg(chan2dev(chan), "issue_pending\n");
  1388. /* Not needed for cyclic transfers */
  1389. if (atc_chan_is_cyclic(atchan))
  1390. return;
  1391. spin_lock_irqsave(&atchan->lock, flags);
  1392. atc_advance_work(atchan);
  1393. spin_unlock_irqrestore(&atchan->lock, flags);
  1394. }
  1395. /**
  1396. * atc_alloc_chan_resources - allocate resources for DMA channel
  1397. * @chan: allocate descriptor resources for this channel
  1398. * @client: current client requesting the channel be ready for requests
  1399. *
  1400. * return - the number of allocated descriptors
  1401. */
  1402. static int atc_alloc_chan_resources(struct dma_chan *chan)
  1403. {
  1404. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1405. struct at_dma *atdma = to_at_dma(chan->device);
  1406. struct at_desc *desc;
  1407. struct at_dma_slave *atslave;
  1408. unsigned long flags;
  1409. int i;
  1410. u32 cfg;
  1411. LIST_HEAD(tmp_list);
  1412. dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
  1413. /* ASSERT: channel is idle */
  1414. if (atc_chan_is_enabled(atchan)) {
  1415. dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
  1416. return -EIO;
  1417. }
  1418. cfg = ATC_DEFAULT_CFG;
  1419. atslave = chan->private;
  1420. if (atslave) {
  1421. /*
  1422. * We need controller-specific data to set up slave
  1423. * transfers.
  1424. */
  1425. BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
  1426. /* if cfg configuration specified take it instead of default */
  1427. if (atslave->cfg)
  1428. cfg = atslave->cfg;
  1429. }
  1430. /* have we already been set up?
  1431. * reconfigure channel but no need to reallocate descriptors */
  1432. if (!list_empty(&atchan->free_list))
  1433. return atchan->descs_allocated;
  1434. /* Allocate initial pool of descriptors */
  1435. for (i = 0; i < init_nr_desc_per_channel; i++) {
  1436. desc = atc_alloc_descriptor(chan, GFP_KERNEL);
  1437. if (!desc) {
  1438. dev_err(atdma->dma_common.dev,
  1439. "Only %d initial descriptors\n", i);
  1440. break;
  1441. }
  1442. list_add_tail(&desc->desc_node, &tmp_list);
  1443. }
  1444. spin_lock_irqsave(&atchan->lock, flags);
  1445. atchan->descs_allocated = i;
  1446. list_splice(&tmp_list, &atchan->free_list);
  1447. dma_cookie_init(chan);
  1448. spin_unlock_irqrestore(&atchan->lock, flags);
  1449. /* channel parameters */
  1450. channel_writel(atchan, CFG, cfg);
  1451. dev_dbg(chan2dev(chan),
  1452. "alloc_chan_resources: allocated %d descriptors\n",
  1453. atchan->descs_allocated);
  1454. return atchan->descs_allocated;
  1455. }
  1456. /**
  1457. * atc_free_chan_resources - free all channel resources
  1458. * @chan: DMA channel
  1459. */
  1460. static void atc_free_chan_resources(struct dma_chan *chan)
  1461. {
  1462. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1463. struct at_dma *atdma = to_at_dma(chan->device);
  1464. struct at_desc *desc, *_desc;
  1465. LIST_HEAD(list);
  1466. dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
  1467. atchan->descs_allocated);
  1468. /* ASSERT: channel is idle */
  1469. BUG_ON(!list_empty(&atchan->active_list));
  1470. BUG_ON(!list_empty(&atchan->queue));
  1471. BUG_ON(atc_chan_is_enabled(atchan));
  1472. list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
  1473. dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
  1474. list_del(&desc->desc_node);
  1475. /* free link descriptor */
  1476. dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
  1477. }
  1478. list_splice_init(&atchan->free_list, &list);
  1479. atchan->descs_allocated = 0;
  1480. atchan->status = 0;
  1481. dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
  1482. }
  1483. #ifdef CONFIG_OF
  1484. static bool at_dma_filter(struct dma_chan *chan, void *slave)
  1485. {
  1486. struct at_dma_slave *atslave = slave;
  1487. if (atslave->dma_dev == chan->device->dev) {
  1488. chan->private = atslave;
  1489. return true;
  1490. } else {
  1491. return false;
  1492. }
  1493. }
  1494. static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
  1495. struct of_dma *of_dma)
  1496. {
  1497. struct dma_chan *chan;
  1498. struct at_dma_chan *atchan;
  1499. struct at_dma_slave *atslave;
  1500. dma_cap_mask_t mask;
  1501. unsigned int per_id;
  1502. struct platform_device *dmac_pdev;
  1503. if (dma_spec->args_count != 2)
  1504. return NULL;
  1505. dmac_pdev = of_find_device_by_node(dma_spec->np);
  1506. dma_cap_zero(mask);
  1507. dma_cap_set(DMA_SLAVE, mask);
  1508. atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
  1509. if (!atslave)
  1510. return NULL;
  1511. atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
  1512. /*
  1513. * We can fill both SRC_PER and DST_PER, one of these fields will be
  1514. * ignored depending on DMA transfer direction.
  1515. */
  1516. per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
  1517. atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
  1518. | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
  1519. /*
  1520. * We have to translate the value we get from the device tree since
  1521. * the half FIFO configuration value had to be 0 to keep backward
  1522. * compatibility.
  1523. */
  1524. switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
  1525. case AT91_DMA_CFG_FIFOCFG_ALAP:
  1526. atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
  1527. break;
  1528. case AT91_DMA_CFG_FIFOCFG_ASAP:
  1529. atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
  1530. break;
  1531. case AT91_DMA_CFG_FIFOCFG_HALF:
  1532. default:
  1533. atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
  1534. }
  1535. atslave->dma_dev = &dmac_pdev->dev;
  1536. chan = dma_request_channel(mask, at_dma_filter, atslave);
  1537. if (!chan)
  1538. return NULL;
  1539. atchan = to_at_dma_chan(chan);
  1540. atchan->per_if = dma_spec->args[0] & 0xff;
  1541. atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
  1542. return chan;
  1543. }
  1544. #else
  1545. static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
  1546. struct of_dma *of_dma)
  1547. {
  1548. return NULL;
  1549. }
  1550. #endif
  1551. /*-- Module Management -----------------------------------------------*/
  1552. /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
  1553. static struct at_dma_platform_data at91sam9rl_config = {
  1554. .nr_channels = 2,
  1555. };
  1556. static struct at_dma_platform_data at91sam9g45_config = {
  1557. .nr_channels = 8,
  1558. };
  1559. #if defined(CONFIG_OF)
  1560. static const struct of_device_id atmel_dma_dt_ids[] = {
  1561. {
  1562. .compatible = "atmel,at91sam9rl-dma",
  1563. .data = &at91sam9rl_config,
  1564. }, {
  1565. .compatible = "atmel,at91sam9g45-dma",
  1566. .data = &at91sam9g45_config,
  1567. }, {
  1568. /* sentinel */
  1569. }
  1570. };
  1571. MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
  1572. #endif
  1573. static const struct platform_device_id atdma_devtypes[] = {
  1574. {
  1575. .name = "at91sam9rl_dma",
  1576. .driver_data = (unsigned long) &at91sam9rl_config,
  1577. }, {
  1578. .name = "at91sam9g45_dma",
  1579. .driver_data = (unsigned long) &at91sam9g45_config,
  1580. }, {
  1581. /* sentinel */
  1582. }
  1583. };
  1584. static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
  1585. struct platform_device *pdev)
  1586. {
  1587. if (pdev->dev.of_node) {
  1588. const struct of_device_id *match;
  1589. match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
  1590. if (match == NULL)
  1591. return NULL;
  1592. return match->data;
  1593. }
  1594. return (struct at_dma_platform_data *)
  1595. platform_get_device_id(pdev)->driver_data;
  1596. }
  1597. /**
  1598. * at_dma_off - disable DMA controller
  1599. * @atdma: the Atmel HDAMC device
  1600. */
  1601. static void at_dma_off(struct at_dma *atdma)
  1602. {
  1603. dma_writel(atdma, EN, 0);
  1604. /* disable all interrupts */
  1605. dma_writel(atdma, EBCIDR, -1L);
  1606. /* confirm that all channels are disabled */
  1607. while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
  1608. cpu_relax();
  1609. }
  1610. static int __init at_dma_probe(struct platform_device *pdev)
  1611. {
  1612. struct resource *io;
  1613. struct at_dma *atdma;
  1614. size_t size;
  1615. int irq;
  1616. int err;
  1617. int i;
  1618. const struct at_dma_platform_data *plat_dat;
  1619. /* setup platform data for each SoC */
  1620. dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
  1621. dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
  1622. dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
  1623. dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
  1624. dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
  1625. dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
  1626. dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
  1627. dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
  1628. dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
  1629. /* get DMA parameters from controller type */
  1630. plat_dat = at_dma_get_driver_data(pdev);
  1631. if (!plat_dat)
  1632. return -ENODEV;
  1633. io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1634. if (!io)
  1635. return -EINVAL;
  1636. irq = platform_get_irq(pdev, 0);
  1637. if (irq < 0)
  1638. return irq;
  1639. size = sizeof(struct at_dma);
  1640. size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
  1641. atdma = kzalloc(size, GFP_KERNEL);
  1642. if (!atdma)
  1643. return -ENOMEM;
  1644. /* discover transaction capabilities */
  1645. atdma->dma_common.cap_mask = plat_dat->cap_mask;
  1646. atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
  1647. size = resource_size(io);
  1648. if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
  1649. err = -EBUSY;
  1650. goto err_kfree;
  1651. }
  1652. atdma->regs = ioremap(io->start, size);
  1653. if (!atdma->regs) {
  1654. err = -ENOMEM;
  1655. goto err_release_r;
  1656. }
  1657. atdma->clk = clk_get(&pdev->dev, "dma_clk");
  1658. if (IS_ERR(atdma->clk)) {
  1659. err = PTR_ERR(atdma->clk);
  1660. goto err_clk;
  1661. }
  1662. err = clk_prepare_enable(atdma->clk);
  1663. if (err)
  1664. goto err_clk_prepare;
  1665. /* force dma off, just in case */
  1666. at_dma_off(atdma);
  1667. err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
  1668. if (err)
  1669. goto err_irq;
  1670. platform_set_drvdata(pdev, atdma);
  1671. /* create a pool of consistent memory blocks for hardware descriptors */
  1672. atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
  1673. &pdev->dev, sizeof(struct at_desc),
  1674. 4 /* word alignment */, 0);
  1675. if (!atdma->dma_desc_pool) {
  1676. dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
  1677. err = -ENOMEM;
  1678. goto err_desc_pool_create;
  1679. }
  1680. /* create a pool of consistent memory blocks for memset blocks */
  1681. atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
  1682. &pdev->dev, sizeof(int), 4, 0);
  1683. if (!atdma->memset_pool) {
  1684. dev_err(&pdev->dev, "No memory for memset dma pool\n");
  1685. err = -ENOMEM;
  1686. goto err_memset_pool_create;
  1687. }
  1688. /* clear any pending interrupt */
  1689. while (dma_readl(atdma, EBCISR))
  1690. cpu_relax();
  1691. /* initialize channels related values */
  1692. INIT_LIST_HEAD(&atdma->dma_common.channels);
  1693. for (i = 0; i < plat_dat->nr_channels; i++) {
  1694. struct at_dma_chan *atchan = &atdma->chan[i];
  1695. atchan->mem_if = AT_DMA_MEM_IF;
  1696. atchan->per_if = AT_DMA_PER_IF;
  1697. atchan->chan_common.device = &atdma->dma_common;
  1698. dma_cookie_init(&atchan->chan_common);
  1699. list_add_tail(&atchan->chan_common.device_node,
  1700. &atdma->dma_common.channels);
  1701. atchan->ch_regs = atdma->regs + ch_regs(i);
  1702. spin_lock_init(&atchan->lock);
  1703. atchan->mask = 1 << i;
  1704. INIT_LIST_HEAD(&atchan->active_list);
  1705. INIT_LIST_HEAD(&atchan->queue);
  1706. INIT_LIST_HEAD(&atchan->free_list);
  1707. tasklet_init(&atchan->tasklet, atc_tasklet,
  1708. (unsigned long)atchan);
  1709. atc_enable_chan_irq(atdma, i);
  1710. }
  1711. /* set base routines */
  1712. atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
  1713. atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
  1714. atdma->dma_common.device_tx_status = atc_tx_status;
  1715. atdma->dma_common.device_issue_pending = atc_issue_pending;
  1716. atdma->dma_common.dev = &pdev->dev;
  1717. /* set prep routines based on capability */
  1718. if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
  1719. atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
  1720. if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
  1721. atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
  1722. if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
  1723. atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
  1724. atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
  1725. atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
  1726. }
  1727. if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
  1728. atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
  1729. /* controller can do slave DMA: can trigger cyclic transfers */
  1730. dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
  1731. atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
  1732. atdma->dma_common.device_config = atc_config;
  1733. atdma->dma_common.device_pause = atc_pause;
  1734. atdma->dma_common.device_resume = atc_resume;
  1735. atdma->dma_common.device_terminate_all = atc_terminate_all;
  1736. atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
  1737. atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
  1738. atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  1739. atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1740. }
  1741. if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
  1742. atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
  1743. dma_writel(atdma, EN, AT_DMA_ENABLE);
  1744. dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
  1745. dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
  1746. dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
  1747. dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
  1748. dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
  1749. plat_dat->nr_channels);
  1750. dma_async_device_register(&atdma->dma_common);
  1751. /*
  1752. * Do not return an error if the dmac node is not present in order to
  1753. * not break the existing way of requesting channel with
  1754. * dma_request_channel().
  1755. */
  1756. if (pdev->dev.of_node) {
  1757. err = of_dma_controller_register(pdev->dev.of_node,
  1758. at_dma_xlate, atdma);
  1759. if (err) {
  1760. dev_err(&pdev->dev, "could not register of_dma_controller\n");
  1761. goto err_of_dma_controller_register;
  1762. }
  1763. }
  1764. return 0;
  1765. err_of_dma_controller_register:
  1766. dma_async_device_unregister(&atdma->dma_common);
  1767. dma_pool_destroy(atdma->memset_pool);
  1768. err_memset_pool_create:
  1769. dma_pool_destroy(atdma->dma_desc_pool);
  1770. err_desc_pool_create:
  1771. free_irq(platform_get_irq(pdev, 0), atdma);
  1772. err_irq:
  1773. clk_disable_unprepare(atdma->clk);
  1774. err_clk_prepare:
  1775. clk_put(atdma->clk);
  1776. err_clk:
  1777. iounmap(atdma->regs);
  1778. atdma->regs = NULL;
  1779. err_release_r:
  1780. release_mem_region(io->start, size);
  1781. err_kfree:
  1782. kfree(atdma);
  1783. return err;
  1784. }
  1785. static int at_dma_remove(struct platform_device *pdev)
  1786. {
  1787. struct at_dma *atdma = platform_get_drvdata(pdev);
  1788. struct dma_chan *chan, *_chan;
  1789. struct resource *io;
  1790. at_dma_off(atdma);
  1791. dma_async_device_unregister(&atdma->dma_common);
  1792. dma_pool_destroy(atdma->memset_pool);
  1793. dma_pool_destroy(atdma->dma_desc_pool);
  1794. free_irq(platform_get_irq(pdev, 0), atdma);
  1795. list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
  1796. device_node) {
  1797. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1798. /* Disable interrupts */
  1799. atc_disable_chan_irq(atdma, chan->chan_id);
  1800. tasklet_kill(&atchan->tasklet);
  1801. list_del(&chan->device_node);
  1802. }
  1803. clk_disable_unprepare(atdma->clk);
  1804. clk_put(atdma->clk);
  1805. iounmap(atdma->regs);
  1806. atdma->regs = NULL;
  1807. io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1808. release_mem_region(io->start, resource_size(io));
  1809. kfree(atdma);
  1810. return 0;
  1811. }
  1812. static void at_dma_shutdown(struct platform_device *pdev)
  1813. {
  1814. struct at_dma *atdma = platform_get_drvdata(pdev);
  1815. at_dma_off(platform_get_drvdata(pdev));
  1816. clk_disable_unprepare(atdma->clk);
  1817. }
  1818. static int at_dma_prepare(struct device *dev)
  1819. {
  1820. struct platform_device *pdev = to_platform_device(dev);
  1821. struct at_dma *atdma = platform_get_drvdata(pdev);
  1822. struct dma_chan *chan, *_chan;
  1823. list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
  1824. device_node) {
  1825. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1826. /* wait for transaction completion (except in cyclic case) */
  1827. if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
  1828. return -EAGAIN;
  1829. }
  1830. return 0;
  1831. }
  1832. static void atc_suspend_cyclic(struct at_dma_chan *atchan)
  1833. {
  1834. struct dma_chan *chan = &atchan->chan_common;
  1835. /* Channel should be paused by user
  1836. * do it anyway even if it is not done already */
  1837. if (!atc_chan_is_paused(atchan)) {
  1838. dev_warn(chan2dev(chan),
  1839. "cyclic channel not paused, should be done by channel user\n");
  1840. atc_pause(chan);
  1841. }
  1842. /* now preserve additional data for cyclic operations */
  1843. /* next descriptor address in the cyclic list */
  1844. atchan->save_dscr = channel_readl(atchan, DSCR);
  1845. vdbg_dump_regs(atchan);
  1846. }
  1847. static int at_dma_suspend_noirq(struct device *dev)
  1848. {
  1849. struct platform_device *pdev = to_platform_device(dev);
  1850. struct at_dma *atdma = platform_get_drvdata(pdev);
  1851. struct dma_chan *chan, *_chan;
  1852. /* preserve data */
  1853. list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
  1854. device_node) {
  1855. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1856. if (atc_chan_is_cyclic(atchan))
  1857. atc_suspend_cyclic(atchan);
  1858. atchan->save_cfg = channel_readl(atchan, CFG);
  1859. }
  1860. atdma->save_imr = dma_readl(atdma, EBCIMR);
  1861. /* disable DMA controller */
  1862. at_dma_off(atdma);
  1863. clk_disable_unprepare(atdma->clk);
  1864. return 0;
  1865. }
  1866. static void atc_resume_cyclic(struct at_dma_chan *atchan)
  1867. {
  1868. struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
  1869. /* restore channel status for cyclic descriptors list:
  1870. * next descriptor in the cyclic list at the time of suspend */
  1871. channel_writel(atchan, SADDR, 0);
  1872. channel_writel(atchan, DADDR, 0);
  1873. channel_writel(atchan, CTRLA, 0);
  1874. channel_writel(atchan, CTRLB, 0);
  1875. channel_writel(atchan, DSCR, atchan->save_dscr);
  1876. dma_writel(atdma, CHER, atchan->mask);
  1877. /* channel pause status should be removed by channel user
  1878. * We cannot take the initiative to do it here */
  1879. vdbg_dump_regs(atchan);
  1880. }
  1881. static int at_dma_resume_noirq(struct device *dev)
  1882. {
  1883. struct platform_device *pdev = to_platform_device(dev);
  1884. struct at_dma *atdma = platform_get_drvdata(pdev);
  1885. struct dma_chan *chan, *_chan;
  1886. /* bring back DMA controller */
  1887. clk_prepare_enable(atdma->clk);
  1888. dma_writel(atdma, EN, AT_DMA_ENABLE);
  1889. /* clear any pending interrupt */
  1890. while (dma_readl(atdma, EBCISR))
  1891. cpu_relax();
  1892. /* restore saved data */
  1893. dma_writel(atdma, EBCIER, atdma->save_imr);
  1894. list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
  1895. device_node) {
  1896. struct at_dma_chan *atchan = to_at_dma_chan(chan);
  1897. channel_writel(atchan, CFG, atchan->save_cfg);
  1898. if (atc_chan_is_cyclic(atchan))
  1899. atc_resume_cyclic(atchan);
  1900. }
  1901. return 0;
  1902. }
  1903. static const struct dev_pm_ops at_dma_dev_pm_ops = {
  1904. .prepare = at_dma_prepare,
  1905. .suspend_noirq = at_dma_suspend_noirq,
  1906. .resume_noirq = at_dma_resume_noirq,
  1907. };
  1908. static struct platform_driver at_dma_driver = {
  1909. .remove = at_dma_remove,
  1910. .shutdown = at_dma_shutdown,
  1911. .id_table = atdma_devtypes,
  1912. .driver = {
  1913. .name = "at_hdmac",
  1914. .pm = &at_dma_dev_pm_ops,
  1915. .of_match_table = of_match_ptr(atmel_dma_dt_ids),
  1916. },
  1917. };
  1918. static int __init at_dma_init(void)
  1919. {
  1920. return platform_driver_probe(&at_dma_driver, at_dma_probe);
  1921. }
  1922. subsys_initcall(at_dma_init);
  1923. static void __exit at_dma_exit(void)
  1924. {
  1925. platform_driver_unregister(&at_dma_driver);
  1926. }
  1927. module_exit(at_dma_exit);
  1928. MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
  1929. MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
  1930. MODULE_LICENSE("GPL");
  1931. MODULE_ALIAS("platform:at_hdmac");