at_xdmac.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044
  1. /*
  2. * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
  3. *
  4. * Copyright (C) 2014 Atmel Corporation
  5. *
  6. * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License version 2 as published by
  10. * the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include <asm/barrier.h>
  21. #include <dt-bindings/dma/at91.h>
  22. #include <linux/clk.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/dmapool.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/irq.h>
  27. #include <linux/kernel.h>
  28. #include <linux/list.h>
  29. #include <linux/module.h>
  30. #include <linux/of_dma.h>
  31. #include <linux/of_platform.h>
  32. #include <linux/platform_device.h>
  33. #include <linux/pm.h>
  34. #include "dmaengine.h"
  35. /* Global registers */
  36. #define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
  37. #define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
  38. #define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
  39. #define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
  40. #define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
  41. #define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
  42. #define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
  43. #define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
  44. #define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
  45. #define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
  46. #define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
  47. #define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
  48. #define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
  49. #define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
  50. #define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */
  51. #define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
  52. #define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
  53. #define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
  54. #define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
  55. #define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
  56. #define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
  57. /* Channel relative registers offsets */
  58. #define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
  59. #define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
  60. #define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
  61. #define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
  62. #define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
  63. #define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
  64. #define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
  65. #define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
  66. #define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
  67. #define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
  68. #define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
  69. #define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
  70. #define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
  71. #define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
  72. #define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
  73. #define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
  74. #define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
  75. #define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
  76. #define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
  77. #define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
  78. #define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
  79. #define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
  80. #define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
  81. #define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
  82. #define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
  83. #define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
  84. #define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
  85. #define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
  86. #define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
  87. #define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
  88. #define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
  89. #define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
  90. #define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
  91. #define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
  92. #define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
  93. #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
  94. #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
  95. #define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
  96. #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
  97. #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
  98. #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
  99. #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
  100. #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
  101. #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
  102. #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
  103. #define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
  104. #define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
  105. #define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
  106. #define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
  107. #define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
  108. #define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
  109. #define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
  110. #define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
  111. #define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
  112. #define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
  113. #define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
  114. #define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
  115. #define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
  116. #define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
  117. #define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
  118. #define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
  119. #define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
  120. #define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
  121. #define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
  122. #define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
  123. #define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
  124. #define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
  125. #define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
  126. #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
  127. #define AT_XDMAC_CC_DWIDTH_OFFSET 11
  128. #define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
  129. #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
  130. #define AT_XDMAC_CC_DWIDTH_BYTE 0x0
  131. #define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
  132. #define AT_XDMAC_CC_DWIDTH_WORD 0x2
  133. #define AT_XDMAC_CC_DWIDTH_DWORD 0x3
  134. #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
  135. #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
  136. #define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
  137. #define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
  138. #define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
  139. #define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
  140. #define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
  141. #define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
  142. #define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
  143. #define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
  144. #define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
  145. #define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
  146. #define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
  147. #define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
  148. #define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
  149. #define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
  150. #define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
  151. #define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
  152. #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
  153. #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
  154. #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
  155. #define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */
  156. #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
  157. #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
  158. #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
  159. #define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
  160. /* Microblock control members */
  161. #define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
  162. #define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
  163. #define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
  164. #define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
  165. #define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
  166. #define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
  167. #define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
  168. #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
  169. #define AT_XDMAC_MAX_CHAN 0x20
  170. #define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
  171. #define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
  172. #define AT_XDMAC_DMA_BUSWIDTHS\
  173. (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
  174. BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
  175. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
  176. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
  177. BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
  178. enum atc_status {
  179. AT_XDMAC_CHAN_IS_CYCLIC = 0,
  180. AT_XDMAC_CHAN_IS_PAUSED,
  181. };
  182. /* ----- Channels ----- */
  183. struct at_xdmac_chan {
  184. struct dma_chan chan;
  185. void __iomem *ch_regs;
  186. u32 mask; /* Channel Mask */
  187. u32 cfg; /* Channel Configuration Register */
  188. u8 perid; /* Peripheral ID */
  189. u8 perif; /* Peripheral Interface */
  190. u8 memif; /* Memory Interface */
  191. u32 save_cc;
  192. u32 save_cim;
  193. u32 save_cnda;
  194. u32 save_cndc;
  195. unsigned long status;
  196. struct tasklet_struct tasklet;
  197. struct dma_slave_config sconfig;
  198. spinlock_t lock;
  199. struct list_head xfers_list;
  200. struct list_head free_descs_list;
  201. };
  202. /* ----- Controller ----- */
  203. struct at_xdmac {
  204. struct dma_device dma;
  205. void __iomem *regs;
  206. int irq;
  207. struct clk *clk;
  208. u32 save_gim;
  209. u32 save_gs;
  210. struct dma_pool *at_xdmac_desc_pool;
  211. struct at_xdmac_chan chan[0];
  212. };
  213. /* ----- Descriptors ----- */
  214. /* Linked List Descriptor */
  215. struct at_xdmac_lld {
  216. dma_addr_t mbr_nda; /* Next Descriptor Member */
  217. u32 mbr_ubc; /* Microblock Control Member */
  218. dma_addr_t mbr_sa; /* Source Address Member */
  219. dma_addr_t mbr_da; /* Destination Address Member */
  220. u32 mbr_cfg; /* Configuration Register */
  221. u32 mbr_bc; /* Block Control Register */
  222. u32 mbr_ds; /* Data Stride Register */
  223. u32 mbr_sus; /* Source Microblock Stride Register */
  224. u32 mbr_dus; /* Destination Microblock Stride Register */
  225. };
  226. struct at_xdmac_desc {
  227. struct at_xdmac_lld lld;
  228. enum dma_transfer_direction direction;
  229. struct dma_async_tx_descriptor tx_dma_desc;
  230. struct list_head desc_node;
  231. /* Following members are only used by the first descriptor */
  232. bool active_xfer;
  233. unsigned int xfer_size;
  234. struct list_head descs_list;
  235. struct list_head xfer_node;
  236. };
  237. static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
  238. {
  239. return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
  240. }
  241. #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
  242. #define at_xdmac_write(atxdmac, reg, value) \
  243. writel_relaxed((value), (atxdmac)->regs + (reg))
  244. #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
  245. #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
  246. static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
  247. {
  248. return container_of(dchan, struct at_xdmac_chan, chan);
  249. }
  250. static struct device *chan2dev(struct dma_chan *chan)
  251. {
  252. return &chan->dev->device;
  253. }
  254. static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
  255. {
  256. return container_of(ddev, struct at_xdmac, dma);
  257. }
  258. static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
  259. {
  260. return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
  261. }
  262. static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
  263. {
  264. return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
  265. }
  266. static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
  267. {
  268. return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
  269. }
  270. static inline int at_xdmac_csize(u32 maxburst)
  271. {
  272. int csize;
  273. csize = ffs(maxburst) - 1;
  274. if (csize > 4)
  275. csize = -EINVAL;
  276. return csize;
  277. };
  278. static inline u8 at_xdmac_get_dwidth(u32 cfg)
  279. {
  280. return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
  281. };
  282. static unsigned int init_nr_desc_per_channel = 64;
  283. module_param(init_nr_desc_per_channel, uint, 0644);
  284. MODULE_PARM_DESC(init_nr_desc_per_channel,
  285. "initial descriptors per channel (default: 64)");
  286. static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
  287. {
  288. return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
  289. }
  290. static void at_xdmac_off(struct at_xdmac *atxdmac)
  291. {
  292. at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
  293. /* Wait that all chans are disabled. */
  294. while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
  295. cpu_relax();
  296. at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
  297. }
  298. /* Call with lock hold. */
  299. static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
  300. struct at_xdmac_desc *first)
  301. {
  302. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  303. u32 reg;
  304. dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
  305. if (at_xdmac_chan_is_enabled(atchan))
  306. return;
  307. /* Set transfer as active to not try to start it again. */
  308. first->active_xfer = true;
  309. /* Tell xdmac where to get the first descriptor. */
  310. reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
  311. | AT_XDMAC_CNDA_NDAIF(atchan->memif);
  312. at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
  313. /*
  314. * When doing non cyclic transfer we need to use the next
  315. * descriptor view 2 since some fields of the configuration register
  316. * depend on transfer size and src/dest addresses.
  317. */
  318. if (at_xdmac_chan_is_cyclic(atchan))
  319. reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
  320. else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
  321. reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
  322. else
  323. reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
  324. /*
  325. * Even if the register will be updated from the configuration in the
  326. * descriptor when using view 2 or higher, the PROT bit won't be set
  327. * properly. This bit can be modified only by using the channel
  328. * configuration register.
  329. */
  330. at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
  331. reg |= AT_XDMAC_CNDC_NDDUP
  332. | AT_XDMAC_CNDC_NDSUP
  333. | AT_XDMAC_CNDC_NDE;
  334. at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
  335. dev_vdbg(chan2dev(&atchan->chan),
  336. "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
  337. __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
  338. at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
  339. at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
  340. at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
  341. at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
  342. at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
  343. at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
  344. reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
  345. /*
  346. * There is no end of list when doing cyclic dma, we need to get
  347. * an interrupt after each periods.
  348. */
  349. if (at_xdmac_chan_is_cyclic(atchan))
  350. at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
  351. reg | AT_XDMAC_CIE_BIE);
  352. else
  353. at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
  354. reg | AT_XDMAC_CIE_LIE);
  355. at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
  356. dev_vdbg(chan2dev(&atchan->chan),
  357. "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
  358. wmb();
  359. at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
  360. dev_vdbg(chan2dev(&atchan->chan),
  361. "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
  362. __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
  363. at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
  364. at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
  365. at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
  366. at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
  367. at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
  368. }
  369. static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
  370. {
  371. struct at_xdmac_desc *desc = txd_to_at_desc(tx);
  372. struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
  373. dma_cookie_t cookie;
  374. unsigned long irqflags;
  375. spin_lock_irqsave(&atchan->lock, irqflags);
  376. cookie = dma_cookie_assign(tx);
  377. dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
  378. __func__, atchan, desc);
  379. list_add_tail(&desc->xfer_node, &atchan->xfers_list);
  380. if (list_is_singular(&atchan->xfers_list))
  381. at_xdmac_start_xfer(atchan, desc);
  382. spin_unlock_irqrestore(&atchan->lock, irqflags);
  383. return cookie;
  384. }
  385. static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
  386. gfp_t gfp_flags)
  387. {
  388. struct at_xdmac_desc *desc;
  389. struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
  390. dma_addr_t phys;
  391. desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
  392. if (desc) {
  393. memset(desc, 0, sizeof(*desc));
  394. INIT_LIST_HEAD(&desc->descs_list);
  395. dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
  396. desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
  397. desc->tx_dma_desc.phys = phys;
  398. }
  399. return desc;
  400. }
  401. void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
  402. {
  403. memset(&desc->lld, 0, sizeof(desc->lld));
  404. INIT_LIST_HEAD(&desc->descs_list);
  405. desc->direction = DMA_TRANS_NONE;
  406. desc->xfer_size = 0;
  407. desc->active_xfer = false;
  408. }
  409. /* Call must be protected by lock. */
  410. static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
  411. {
  412. struct at_xdmac_desc *desc;
  413. if (list_empty(&atchan->free_descs_list)) {
  414. desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
  415. } else {
  416. desc = list_first_entry(&atchan->free_descs_list,
  417. struct at_xdmac_desc, desc_node);
  418. list_del(&desc->desc_node);
  419. at_xdmac_init_used_desc(desc);
  420. }
  421. return desc;
  422. }
  423. static void at_xdmac_queue_desc(struct dma_chan *chan,
  424. struct at_xdmac_desc *prev,
  425. struct at_xdmac_desc *desc)
  426. {
  427. if (!prev || !desc)
  428. return;
  429. prev->lld.mbr_nda = desc->tx_dma_desc.phys;
  430. prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
  431. dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
  432. __func__, prev, &prev->lld.mbr_nda);
  433. }
  434. static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
  435. struct at_xdmac_desc *desc)
  436. {
  437. if (!desc)
  438. return;
  439. desc->lld.mbr_bc++;
  440. dev_dbg(chan2dev(chan),
  441. "%s: incrementing the block count of the desc 0x%p\n",
  442. __func__, desc);
  443. }
  444. static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
  445. struct of_dma *of_dma)
  446. {
  447. struct at_xdmac *atxdmac = of_dma->of_dma_data;
  448. struct at_xdmac_chan *atchan;
  449. struct dma_chan *chan;
  450. struct device *dev = atxdmac->dma.dev;
  451. if (dma_spec->args_count != 1) {
  452. dev_err(dev, "dma phandler args: bad number of args\n");
  453. return NULL;
  454. }
  455. chan = dma_get_any_slave_channel(&atxdmac->dma);
  456. if (!chan) {
  457. dev_err(dev, "can't get a dma channel\n");
  458. return NULL;
  459. }
  460. atchan = to_at_xdmac_chan(chan);
  461. atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
  462. atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
  463. atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
  464. dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
  465. atchan->memif, atchan->perif, atchan->perid);
  466. return chan;
  467. }
  468. static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
  469. enum dma_transfer_direction direction)
  470. {
  471. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  472. int csize, dwidth;
  473. if (direction == DMA_DEV_TO_MEM) {
  474. atchan->cfg =
  475. AT91_XDMAC_DT_PERID(atchan->perid)
  476. | AT_XDMAC_CC_DAM_INCREMENTED_AM
  477. | AT_XDMAC_CC_SAM_FIXED_AM
  478. | AT_XDMAC_CC_DIF(atchan->memif)
  479. | AT_XDMAC_CC_SIF(atchan->perif)
  480. | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
  481. | AT_XDMAC_CC_DSYNC_PER2MEM
  482. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  483. | AT_XDMAC_CC_TYPE_PER_TRAN;
  484. csize = ffs(atchan->sconfig.src_maxburst) - 1;
  485. if (csize < 0) {
  486. dev_err(chan2dev(chan), "invalid src maxburst value\n");
  487. return -EINVAL;
  488. }
  489. atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
  490. dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
  491. if (dwidth < 0) {
  492. dev_err(chan2dev(chan), "invalid src addr width value\n");
  493. return -EINVAL;
  494. }
  495. atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
  496. } else if (direction == DMA_MEM_TO_DEV) {
  497. atchan->cfg =
  498. AT91_XDMAC_DT_PERID(atchan->perid)
  499. | AT_XDMAC_CC_DAM_FIXED_AM
  500. | AT_XDMAC_CC_SAM_INCREMENTED_AM
  501. | AT_XDMAC_CC_DIF(atchan->perif)
  502. | AT_XDMAC_CC_SIF(atchan->memif)
  503. | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
  504. | AT_XDMAC_CC_DSYNC_MEM2PER
  505. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  506. | AT_XDMAC_CC_TYPE_PER_TRAN;
  507. csize = ffs(atchan->sconfig.dst_maxburst) - 1;
  508. if (csize < 0) {
  509. dev_err(chan2dev(chan), "invalid src maxburst value\n");
  510. return -EINVAL;
  511. }
  512. atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
  513. dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
  514. if (dwidth < 0) {
  515. dev_err(chan2dev(chan), "invalid dst addr width value\n");
  516. return -EINVAL;
  517. }
  518. atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
  519. }
  520. dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
  521. return 0;
  522. }
  523. /*
  524. * Only check that maxburst and addr width values are supported by the
  525. * the controller but not that the configuration is good to perform the
  526. * transfer since we don't know the direction at this stage.
  527. */
  528. static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
  529. {
  530. if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
  531. || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
  532. return -EINVAL;
  533. if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
  534. || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
  535. return -EINVAL;
  536. return 0;
  537. }
  538. static int at_xdmac_set_slave_config(struct dma_chan *chan,
  539. struct dma_slave_config *sconfig)
  540. {
  541. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  542. if (at_xdmac_check_slave_config(sconfig)) {
  543. dev_err(chan2dev(chan), "invalid slave configuration\n");
  544. return -EINVAL;
  545. }
  546. memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
  547. return 0;
  548. }
  549. static struct dma_async_tx_descriptor *
  550. at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  551. unsigned int sg_len, enum dma_transfer_direction direction,
  552. unsigned long flags, void *context)
  553. {
  554. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  555. struct at_xdmac_desc *first = NULL, *prev = NULL;
  556. struct scatterlist *sg;
  557. int i;
  558. unsigned int xfer_size = 0;
  559. unsigned long irqflags;
  560. struct dma_async_tx_descriptor *ret = NULL;
  561. if (!sgl)
  562. return NULL;
  563. if (!is_slave_direction(direction)) {
  564. dev_err(chan2dev(chan), "invalid DMA direction\n");
  565. return NULL;
  566. }
  567. dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
  568. __func__, sg_len,
  569. direction == DMA_MEM_TO_DEV ? "to device" : "from device",
  570. flags);
  571. /* Protect dma_sconfig field that can be modified by set_slave_conf. */
  572. spin_lock_irqsave(&atchan->lock, irqflags);
  573. if (at_xdmac_compute_chan_conf(chan, direction))
  574. goto spin_unlock;
  575. /* Prepare descriptors. */
  576. for_each_sg(sgl, sg, sg_len, i) {
  577. struct at_xdmac_desc *desc = NULL;
  578. u32 len, mem, dwidth, fixed_dwidth;
  579. len = sg_dma_len(sg);
  580. mem = sg_dma_address(sg);
  581. if (unlikely(!len)) {
  582. dev_err(chan2dev(chan), "sg data length is zero\n");
  583. goto spin_unlock;
  584. }
  585. dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
  586. __func__, i, len, mem);
  587. desc = at_xdmac_get_desc(atchan);
  588. if (!desc) {
  589. dev_err(chan2dev(chan), "can't get descriptor\n");
  590. if (first)
  591. list_splice_init(&first->descs_list, &atchan->free_descs_list);
  592. goto spin_unlock;
  593. }
  594. /* Linked list descriptor setup. */
  595. if (direction == DMA_DEV_TO_MEM) {
  596. desc->lld.mbr_sa = atchan->sconfig.src_addr;
  597. desc->lld.mbr_da = mem;
  598. } else {
  599. desc->lld.mbr_sa = mem;
  600. desc->lld.mbr_da = atchan->sconfig.dst_addr;
  601. }
  602. dwidth = at_xdmac_get_dwidth(atchan->cfg);
  603. fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
  604. ? dwidth
  605. : AT_XDMAC_CC_DWIDTH_BYTE;
  606. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
  607. | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
  608. | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
  609. | (len >> fixed_dwidth); /* microblock length */
  610. desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
  611. AT_XDMAC_CC_DWIDTH(fixed_dwidth);
  612. dev_dbg(chan2dev(chan),
  613. "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
  614. __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
  615. /* Chain lld. */
  616. if (prev)
  617. at_xdmac_queue_desc(chan, prev, desc);
  618. prev = desc;
  619. if (!first)
  620. first = desc;
  621. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  622. __func__, desc, first);
  623. list_add_tail(&desc->desc_node, &first->descs_list);
  624. xfer_size += len;
  625. }
  626. first->tx_dma_desc.flags = flags;
  627. first->xfer_size = xfer_size;
  628. first->direction = direction;
  629. ret = &first->tx_dma_desc;
  630. spin_unlock:
  631. spin_unlock_irqrestore(&atchan->lock, irqflags);
  632. return ret;
  633. }
  634. static struct dma_async_tx_descriptor *
  635. at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
  636. size_t buf_len, size_t period_len,
  637. enum dma_transfer_direction direction,
  638. unsigned long flags)
  639. {
  640. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  641. struct at_xdmac_desc *first = NULL, *prev = NULL;
  642. unsigned int periods = buf_len / period_len;
  643. int i;
  644. unsigned long irqflags;
  645. dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
  646. __func__, &buf_addr, buf_len, period_len,
  647. direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
  648. if (!is_slave_direction(direction)) {
  649. dev_err(chan2dev(chan), "invalid DMA direction\n");
  650. return NULL;
  651. }
  652. if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
  653. dev_err(chan2dev(chan), "channel currently used\n");
  654. return NULL;
  655. }
  656. if (at_xdmac_compute_chan_conf(chan, direction))
  657. return NULL;
  658. for (i = 0; i < periods; i++) {
  659. struct at_xdmac_desc *desc = NULL;
  660. spin_lock_irqsave(&atchan->lock, irqflags);
  661. desc = at_xdmac_get_desc(atchan);
  662. if (!desc) {
  663. dev_err(chan2dev(chan), "can't get descriptor\n");
  664. if (first)
  665. list_splice_init(&first->descs_list, &atchan->free_descs_list);
  666. spin_unlock_irqrestore(&atchan->lock, irqflags);
  667. return NULL;
  668. }
  669. spin_unlock_irqrestore(&atchan->lock, irqflags);
  670. dev_dbg(chan2dev(chan),
  671. "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
  672. __func__, desc, &desc->tx_dma_desc.phys);
  673. if (direction == DMA_DEV_TO_MEM) {
  674. desc->lld.mbr_sa = atchan->sconfig.src_addr;
  675. desc->lld.mbr_da = buf_addr + i * period_len;
  676. } else {
  677. desc->lld.mbr_sa = buf_addr + i * period_len;
  678. desc->lld.mbr_da = atchan->sconfig.dst_addr;
  679. }
  680. desc->lld.mbr_cfg = atchan->cfg;
  681. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
  682. | AT_XDMAC_MBR_UBC_NDEN
  683. | AT_XDMAC_MBR_UBC_NSEN
  684. | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
  685. dev_dbg(chan2dev(chan),
  686. "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
  687. __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
  688. /* Chain lld. */
  689. if (prev)
  690. at_xdmac_queue_desc(chan, prev, desc);
  691. prev = desc;
  692. if (!first)
  693. first = desc;
  694. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  695. __func__, desc, first);
  696. list_add_tail(&desc->desc_node, &first->descs_list);
  697. }
  698. at_xdmac_queue_desc(chan, prev, first);
  699. first->tx_dma_desc.flags = flags;
  700. first->xfer_size = buf_len;
  701. first->direction = direction;
  702. return &first->tx_dma_desc;
  703. }
  704. static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
  705. {
  706. u32 width;
  707. /*
  708. * Check address alignment to select the greater data width we
  709. * can use.
  710. *
  711. * Some XDMAC implementations don't provide dword transfer, in
  712. * this case selecting dword has the same behavior as
  713. * selecting word transfers.
  714. */
  715. if (!(addr & 7)) {
  716. width = AT_XDMAC_CC_DWIDTH_DWORD;
  717. dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
  718. } else if (!(addr & 3)) {
  719. width = AT_XDMAC_CC_DWIDTH_WORD;
  720. dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
  721. } else if (!(addr & 1)) {
  722. width = AT_XDMAC_CC_DWIDTH_HALFWORD;
  723. dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
  724. } else {
  725. width = AT_XDMAC_CC_DWIDTH_BYTE;
  726. dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
  727. }
  728. return width;
  729. }
  730. static struct at_xdmac_desc *
  731. at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
  732. struct at_xdmac_chan *atchan,
  733. struct at_xdmac_desc *prev,
  734. dma_addr_t src, dma_addr_t dst,
  735. struct dma_interleaved_template *xt,
  736. struct data_chunk *chunk)
  737. {
  738. struct at_xdmac_desc *desc;
  739. u32 dwidth;
  740. unsigned long flags;
  741. size_t ublen;
  742. /*
  743. * WARNING: The channel configuration is set here since there is no
  744. * dmaengine_slave_config call in this case. Moreover we don't know the
  745. * direction, it involves we can't dynamically set the source and dest
  746. * interface so we have to use the same one. Only interface 0 allows EBI
  747. * access. Hopefully we can access DDR through both ports (at least on
  748. * SAMA5D4x), so we can use the same interface for source and dest,
  749. * that solves the fact we don't know the direction.
  750. */
  751. u32 chan_cc = AT_XDMAC_CC_DIF(0)
  752. | AT_XDMAC_CC_SIF(0)
  753. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  754. | AT_XDMAC_CC_TYPE_MEM_TRAN;
  755. dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
  756. if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
  757. dev_dbg(chan2dev(chan),
  758. "%s: chunk too big (%d, max size %lu)...\n",
  759. __func__, chunk->size,
  760. AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
  761. return NULL;
  762. }
  763. if (prev)
  764. dev_dbg(chan2dev(chan),
  765. "Adding items at the end of desc 0x%p\n", prev);
  766. if (xt->src_inc) {
  767. if (xt->src_sgl)
  768. chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
  769. else
  770. chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
  771. }
  772. if (xt->dst_inc) {
  773. if (xt->dst_sgl)
  774. chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
  775. else
  776. chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
  777. }
  778. spin_lock_irqsave(&atchan->lock, flags);
  779. desc = at_xdmac_get_desc(atchan);
  780. spin_unlock_irqrestore(&atchan->lock, flags);
  781. if (!desc) {
  782. dev_err(chan2dev(chan), "can't get descriptor\n");
  783. return NULL;
  784. }
  785. chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
  786. ublen = chunk->size >> dwidth;
  787. desc->lld.mbr_sa = src;
  788. desc->lld.mbr_da = dst;
  789. desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
  790. desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
  791. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
  792. | AT_XDMAC_MBR_UBC_NDEN
  793. | AT_XDMAC_MBR_UBC_NSEN
  794. | ublen;
  795. desc->lld.mbr_cfg = chan_cc;
  796. dev_dbg(chan2dev(chan),
  797. "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
  798. __func__, desc->lld.mbr_sa, desc->lld.mbr_da,
  799. desc->lld.mbr_ubc, desc->lld.mbr_cfg);
  800. /* Chain lld. */
  801. if (prev)
  802. at_xdmac_queue_desc(chan, prev, desc);
  803. return desc;
  804. }
  805. static struct dma_async_tx_descriptor *
  806. at_xdmac_prep_interleaved(struct dma_chan *chan,
  807. struct dma_interleaved_template *xt,
  808. unsigned long flags)
  809. {
  810. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  811. struct at_xdmac_desc *prev = NULL, *first = NULL;
  812. struct data_chunk *chunk, *prev_chunk = NULL;
  813. dma_addr_t dst_addr, src_addr;
  814. size_t dst_skip, src_skip, len = 0;
  815. size_t prev_dst_icg = 0, prev_src_icg = 0;
  816. int i;
  817. if (!xt || (xt->numf != 1) || (xt->dir != DMA_MEM_TO_MEM))
  818. return NULL;
  819. dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
  820. __func__, xt->src_start, xt->dst_start, xt->numf,
  821. xt->frame_size, flags);
  822. src_addr = xt->src_start;
  823. dst_addr = xt->dst_start;
  824. for (i = 0; i < xt->frame_size; i++) {
  825. struct at_xdmac_desc *desc;
  826. size_t src_icg, dst_icg;
  827. chunk = xt->sgl + i;
  828. dst_icg = dmaengine_get_dst_icg(xt, chunk);
  829. src_icg = dmaengine_get_src_icg(xt, chunk);
  830. src_skip = chunk->size + src_icg;
  831. dst_skip = chunk->size + dst_icg;
  832. dev_dbg(chan2dev(chan),
  833. "%s: chunk size=%d, src icg=%d, dst icg=%d\n",
  834. __func__, chunk->size, src_icg, dst_icg);
  835. /*
  836. * Handle the case where we just have the same
  837. * transfer to setup, we can just increase the
  838. * block number and reuse the same descriptor.
  839. */
  840. if (prev_chunk && prev &&
  841. (prev_chunk->size == chunk->size) &&
  842. (prev_src_icg == src_icg) &&
  843. (prev_dst_icg == dst_icg)) {
  844. dev_dbg(chan2dev(chan),
  845. "%s: same configuration that the previous chunk, merging the descriptors...\n",
  846. __func__);
  847. at_xdmac_increment_block_count(chan, prev);
  848. continue;
  849. }
  850. desc = at_xdmac_interleaved_queue_desc(chan, atchan,
  851. prev,
  852. src_addr, dst_addr,
  853. xt, chunk);
  854. if (!desc) {
  855. list_splice_init(&first->descs_list,
  856. &atchan->free_descs_list);
  857. return NULL;
  858. }
  859. if (!first)
  860. first = desc;
  861. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  862. __func__, desc, first);
  863. list_add_tail(&desc->desc_node, &first->descs_list);
  864. if (xt->src_sgl)
  865. src_addr += src_skip;
  866. if (xt->dst_sgl)
  867. dst_addr += dst_skip;
  868. len += chunk->size;
  869. prev_chunk = chunk;
  870. prev_dst_icg = dst_icg;
  871. prev_src_icg = src_icg;
  872. prev = desc;
  873. }
  874. first->tx_dma_desc.cookie = -EBUSY;
  875. first->tx_dma_desc.flags = flags;
  876. first->xfer_size = len;
  877. return &first->tx_dma_desc;
  878. }
  879. static struct dma_async_tx_descriptor *
  880. at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  881. size_t len, unsigned long flags)
  882. {
  883. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  884. struct at_xdmac_desc *first = NULL, *prev = NULL;
  885. size_t remaining_size = len, xfer_size = 0, ublen;
  886. dma_addr_t src_addr = src, dst_addr = dest;
  887. u32 dwidth;
  888. /*
  889. * WARNING: We don't know the direction, it involves we can't
  890. * dynamically set the source and dest interface so we have to use the
  891. * same one. Only interface 0 allows EBI access. Hopefully we can
  892. * access DDR through both ports (at least on SAMA5D4x), so we can use
  893. * the same interface for source and dest, that solves the fact we
  894. * don't know the direction.
  895. */
  896. u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
  897. | AT_XDMAC_CC_SAM_INCREMENTED_AM
  898. | AT_XDMAC_CC_DIF(0)
  899. | AT_XDMAC_CC_SIF(0)
  900. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  901. | AT_XDMAC_CC_TYPE_MEM_TRAN;
  902. unsigned long irqflags;
  903. dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
  904. __func__, &src, &dest, len, flags);
  905. if (unlikely(!len))
  906. return NULL;
  907. dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
  908. /* Prepare descriptors. */
  909. while (remaining_size) {
  910. struct at_xdmac_desc *desc = NULL;
  911. dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
  912. spin_lock_irqsave(&atchan->lock, irqflags);
  913. desc = at_xdmac_get_desc(atchan);
  914. spin_unlock_irqrestore(&atchan->lock, irqflags);
  915. if (!desc) {
  916. dev_err(chan2dev(chan), "can't get descriptor\n");
  917. if (first)
  918. list_splice_init(&first->descs_list, &atchan->free_descs_list);
  919. return NULL;
  920. }
  921. /* Update src and dest addresses. */
  922. src_addr += xfer_size;
  923. dst_addr += xfer_size;
  924. if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
  925. xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
  926. else
  927. xfer_size = remaining_size;
  928. dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
  929. /* Check remaining length and change data width if needed. */
  930. dwidth = at_xdmac_align_width(chan,
  931. src_addr | dst_addr | xfer_size);
  932. chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
  933. ublen = xfer_size >> dwidth;
  934. remaining_size -= xfer_size;
  935. desc->lld.mbr_sa = src_addr;
  936. desc->lld.mbr_da = dst_addr;
  937. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
  938. | AT_XDMAC_MBR_UBC_NDEN
  939. | AT_XDMAC_MBR_UBC_NSEN
  940. | ublen;
  941. desc->lld.mbr_cfg = chan_cc;
  942. dev_dbg(chan2dev(chan),
  943. "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
  944. __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
  945. /* Chain lld. */
  946. if (prev)
  947. at_xdmac_queue_desc(chan, prev, desc);
  948. prev = desc;
  949. if (!first)
  950. first = desc;
  951. dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
  952. __func__, desc, first);
  953. list_add_tail(&desc->desc_node, &first->descs_list);
  954. }
  955. first->tx_dma_desc.flags = flags;
  956. first->xfer_size = len;
  957. return &first->tx_dma_desc;
  958. }
  959. static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
  960. struct at_xdmac_chan *atchan,
  961. dma_addr_t dst_addr,
  962. size_t len,
  963. int value)
  964. {
  965. struct at_xdmac_desc *desc;
  966. unsigned long flags;
  967. size_t ublen;
  968. u32 dwidth;
  969. /*
  970. * WARNING: The channel configuration is set here since there is no
  971. * dmaengine_slave_config call in this case. Moreover we don't know the
  972. * direction, it involves we can't dynamically set the source and dest
  973. * interface so we have to use the same one. Only interface 0 allows EBI
  974. * access. Hopefully we can access DDR through both ports (at least on
  975. * SAMA5D4x), so we can use the same interface for source and dest,
  976. * that solves the fact we don't know the direction.
  977. */
  978. u32 chan_cc = AT_XDMAC_CC_DAM_UBS_AM
  979. | AT_XDMAC_CC_SAM_INCREMENTED_AM
  980. | AT_XDMAC_CC_DIF(0)
  981. | AT_XDMAC_CC_SIF(0)
  982. | AT_XDMAC_CC_MBSIZE_SIXTEEN
  983. | AT_XDMAC_CC_MEMSET_HW_MODE
  984. | AT_XDMAC_CC_TYPE_MEM_TRAN;
  985. dwidth = at_xdmac_align_width(chan, dst_addr);
  986. if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
  987. dev_err(chan2dev(chan),
  988. "%s: Transfer too large, aborting...\n",
  989. __func__);
  990. return NULL;
  991. }
  992. spin_lock_irqsave(&atchan->lock, flags);
  993. desc = at_xdmac_get_desc(atchan);
  994. spin_unlock_irqrestore(&atchan->lock, flags);
  995. if (!desc) {
  996. dev_err(chan2dev(chan), "can't get descriptor\n");
  997. return NULL;
  998. }
  999. chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
  1000. ublen = len >> dwidth;
  1001. desc->lld.mbr_da = dst_addr;
  1002. desc->lld.mbr_ds = value;
  1003. desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
  1004. | AT_XDMAC_MBR_UBC_NDEN
  1005. | AT_XDMAC_MBR_UBC_NSEN
  1006. | ublen;
  1007. desc->lld.mbr_cfg = chan_cc;
  1008. dev_dbg(chan2dev(chan),
  1009. "%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
  1010. __func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
  1011. desc->lld.mbr_cfg);
  1012. return desc;
  1013. }
  1014. struct dma_async_tx_descriptor *
  1015. at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
  1016. size_t len, unsigned long flags)
  1017. {
  1018. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1019. struct at_xdmac_desc *desc;
  1020. dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
  1021. __func__, dest, len, value, flags);
  1022. if (unlikely(!len))
  1023. return NULL;
  1024. desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
  1025. list_add_tail(&desc->desc_node, &desc->descs_list);
  1026. desc->tx_dma_desc.cookie = -EBUSY;
  1027. desc->tx_dma_desc.flags = flags;
  1028. desc->xfer_size = len;
  1029. return &desc->tx_dma_desc;
  1030. }
  1031. static struct dma_async_tx_descriptor *
  1032. at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
  1033. unsigned int sg_len, int value,
  1034. unsigned long flags)
  1035. {
  1036. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1037. struct at_xdmac_desc *desc, *pdesc = NULL,
  1038. *ppdesc = NULL, *first = NULL;
  1039. struct scatterlist *sg, *psg = NULL, *ppsg = NULL;
  1040. size_t stride = 0, pstride = 0, len = 0;
  1041. int i;
  1042. if (!sgl)
  1043. return NULL;
  1044. dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
  1045. __func__, sg_len, value, flags);
  1046. /* Prepare descriptors. */
  1047. for_each_sg(sgl, sg, sg_len, i) {
  1048. dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
  1049. __func__, sg_dma_address(sg), sg_dma_len(sg),
  1050. value, flags);
  1051. desc = at_xdmac_memset_create_desc(chan, atchan,
  1052. sg_dma_address(sg),
  1053. sg_dma_len(sg),
  1054. value);
  1055. if (!desc && first)
  1056. list_splice_init(&first->descs_list,
  1057. &atchan->free_descs_list);
  1058. if (!first)
  1059. first = desc;
  1060. /* Update our strides */
  1061. pstride = stride;
  1062. if (psg)
  1063. stride = sg_dma_address(sg) -
  1064. (sg_dma_address(psg) + sg_dma_len(psg));
  1065. /*
  1066. * The scatterlist API gives us only the address and
  1067. * length of each elements.
  1068. *
  1069. * Unfortunately, we don't have the stride, which we
  1070. * will need to compute.
  1071. *
  1072. * That make us end up in a situation like this one:
  1073. * len stride len stride len
  1074. * +-------+ +-------+ +-------+
  1075. * | N-2 | | N-1 | | N |
  1076. * +-------+ +-------+ +-------+
  1077. *
  1078. * We need all these three elements (N-2, N-1 and N)
  1079. * to actually take the decision on whether we need to
  1080. * queue N-1 or reuse N-2.
  1081. *
  1082. * We will only consider N if it is the last element.
  1083. */
  1084. if (ppdesc && pdesc) {
  1085. if ((stride == pstride) &&
  1086. (sg_dma_len(ppsg) == sg_dma_len(psg))) {
  1087. dev_dbg(chan2dev(chan),
  1088. "%s: desc 0x%p can be merged with desc 0x%p\n",
  1089. __func__, pdesc, ppdesc);
  1090. /*
  1091. * Increment the block count of the
  1092. * N-2 descriptor
  1093. */
  1094. at_xdmac_increment_block_count(chan, ppdesc);
  1095. ppdesc->lld.mbr_dus = stride;
  1096. /*
  1097. * Put back the N-1 descriptor in the
  1098. * free descriptor list
  1099. */
  1100. list_add_tail(&pdesc->desc_node,
  1101. &atchan->free_descs_list);
  1102. /*
  1103. * Make our N-1 descriptor pointer
  1104. * point to the N-2 since they were
  1105. * actually merged.
  1106. */
  1107. pdesc = ppdesc;
  1108. /*
  1109. * Rule out the case where we don't have
  1110. * pstride computed yet (our second sg
  1111. * element)
  1112. *
  1113. * We also want to catch the case where there
  1114. * would be a negative stride,
  1115. */
  1116. } else if (pstride ||
  1117. sg_dma_address(sg) < sg_dma_address(psg)) {
  1118. /*
  1119. * Queue the N-1 descriptor after the
  1120. * N-2
  1121. */
  1122. at_xdmac_queue_desc(chan, ppdesc, pdesc);
  1123. /*
  1124. * Add the N-1 descriptor to the list
  1125. * of the descriptors used for this
  1126. * transfer
  1127. */
  1128. list_add_tail(&desc->desc_node,
  1129. &first->descs_list);
  1130. dev_dbg(chan2dev(chan),
  1131. "%s: add desc 0x%p to descs_list 0x%p\n",
  1132. __func__, desc, first);
  1133. }
  1134. }
  1135. /*
  1136. * If we are the last element, just see if we have the
  1137. * same size than the previous element.
  1138. *
  1139. * If so, we can merge it with the previous descriptor
  1140. * since we don't care about the stride anymore.
  1141. */
  1142. if ((i == (sg_len - 1)) &&
  1143. sg_dma_len(ppsg) == sg_dma_len(psg)) {
  1144. dev_dbg(chan2dev(chan),
  1145. "%s: desc 0x%p can be merged with desc 0x%p\n",
  1146. __func__, desc, pdesc);
  1147. /*
  1148. * Increment the block count of the N-1
  1149. * descriptor
  1150. */
  1151. at_xdmac_increment_block_count(chan, pdesc);
  1152. pdesc->lld.mbr_dus = stride;
  1153. /*
  1154. * Put back the N descriptor in the free
  1155. * descriptor list
  1156. */
  1157. list_add_tail(&desc->desc_node,
  1158. &atchan->free_descs_list);
  1159. }
  1160. /* Update our descriptors */
  1161. ppdesc = pdesc;
  1162. pdesc = desc;
  1163. /* Update our scatter pointers */
  1164. ppsg = psg;
  1165. psg = sg;
  1166. len += sg_dma_len(sg);
  1167. }
  1168. first->tx_dma_desc.cookie = -EBUSY;
  1169. first->tx_dma_desc.flags = flags;
  1170. first->xfer_size = len;
  1171. return &first->tx_dma_desc;
  1172. }
  1173. static enum dma_status
  1174. at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
  1175. struct dma_tx_state *txstate)
  1176. {
  1177. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1178. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1179. struct at_xdmac_desc *desc, *_desc;
  1180. struct list_head *descs_list;
  1181. enum dma_status ret;
  1182. int residue;
  1183. u32 cur_nda, mask, value;
  1184. u8 dwidth = 0;
  1185. unsigned long flags;
  1186. ret = dma_cookie_status(chan, cookie, txstate);
  1187. if (ret == DMA_COMPLETE)
  1188. return ret;
  1189. if (!txstate)
  1190. return ret;
  1191. spin_lock_irqsave(&atchan->lock, flags);
  1192. desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
  1193. /*
  1194. * If the transfer has not been started yet, don't need to compute the
  1195. * residue, it's the transfer length.
  1196. */
  1197. if (!desc->active_xfer) {
  1198. dma_set_residue(txstate, desc->xfer_size);
  1199. goto spin_unlock;
  1200. }
  1201. residue = desc->xfer_size;
  1202. /*
  1203. * Flush FIFO: only relevant when the transfer is source peripheral
  1204. * synchronized.
  1205. */
  1206. mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
  1207. value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
  1208. if ((desc->lld.mbr_cfg & mask) == value) {
  1209. at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
  1210. while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
  1211. cpu_relax();
  1212. }
  1213. cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
  1214. /*
  1215. * Remove size of all microblocks already transferred and the current
  1216. * one. Then add the remaining size to transfer of the current
  1217. * microblock.
  1218. */
  1219. descs_list = &desc->descs_list;
  1220. list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
  1221. dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
  1222. residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
  1223. if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
  1224. break;
  1225. }
  1226. residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
  1227. dma_set_residue(txstate, residue);
  1228. dev_dbg(chan2dev(chan),
  1229. "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
  1230. __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
  1231. spin_unlock:
  1232. spin_unlock_irqrestore(&atchan->lock, flags);
  1233. return ret;
  1234. }
  1235. /* Call must be protected by lock. */
  1236. static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
  1237. struct at_xdmac_desc *desc)
  1238. {
  1239. dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
  1240. /*
  1241. * Remove the transfer from the transfer list then move the transfer
  1242. * descriptors into the free descriptors list.
  1243. */
  1244. list_del(&desc->xfer_node);
  1245. list_splice_init(&desc->descs_list, &atchan->free_descs_list);
  1246. }
  1247. static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
  1248. {
  1249. struct at_xdmac_desc *desc;
  1250. unsigned long flags;
  1251. spin_lock_irqsave(&atchan->lock, flags);
  1252. /*
  1253. * If channel is enabled, do nothing, advance_work will be triggered
  1254. * after the interruption.
  1255. */
  1256. if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
  1257. desc = list_first_entry(&atchan->xfers_list,
  1258. struct at_xdmac_desc,
  1259. xfer_node);
  1260. dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
  1261. if (!desc->active_xfer)
  1262. at_xdmac_start_xfer(atchan, desc);
  1263. }
  1264. spin_unlock_irqrestore(&atchan->lock, flags);
  1265. }
  1266. static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
  1267. {
  1268. struct at_xdmac_desc *desc;
  1269. struct dma_async_tx_descriptor *txd;
  1270. desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
  1271. txd = &desc->tx_dma_desc;
  1272. if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
  1273. txd->callback(txd->callback_param);
  1274. }
  1275. static void at_xdmac_tasklet(unsigned long data)
  1276. {
  1277. struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
  1278. struct at_xdmac_desc *desc;
  1279. u32 error_mask;
  1280. dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
  1281. __func__, atchan->status);
  1282. error_mask = AT_XDMAC_CIS_RBEIS
  1283. | AT_XDMAC_CIS_WBEIS
  1284. | AT_XDMAC_CIS_ROIS;
  1285. if (at_xdmac_chan_is_cyclic(atchan)) {
  1286. at_xdmac_handle_cyclic(atchan);
  1287. } else if ((atchan->status & AT_XDMAC_CIS_LIS)
  1288. || (atchan->status & error_mask)) {
  1289. struct dma_async_tx_descriptor *txd;
  1290. if (atchan->status & AT_XDMAC_CIS_RBEIS)
  1291. dev_err(chan2dev(&atchan->chan), "read bus error!!!");
  1292. if (atchan->status & AT_XDMAC_CIS_WBEIS)
  1293. dev_err(chan2dev(&atchan->chan), "write bus error!!!");
  1294. if (atchan->status & AT_XDMAC_CIS_ROIS)
  1295. dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
  1296. spin_lock_bh(&atchan->lock);
  1297. desc = list_first_entry(&atchan->xfers_list,
  1298. struct at_xdmac_desc,
  1299. xfer_node);
  1300. dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
  1301. BUG_ON(!desc->active_xfer);
  1302. txd = &desc->tx_dma_desc;
  1303. at_xdmac_remove_xfer(atchan, desc);
  1304. spin_unlock_bh(&atchan->lock);
  1305. if (!at_xdmac_chan_is_cyclic(atchan)) {
  1306. dma_cookie_complete(txd);
  1307. if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
  1308. txd->callback(txd->callback_param);
  1309. }
  1310. dma_run_dependencies(txd);
  1311. at_xdmac_advance_work(atchan);
  1312. }
  1313. }
  1314. static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
  1315. {
  1316. struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
  1317. struct at_xdmac_chan *atchan;
  1318. u32 imr, status, pending;
  1319. u32 chan_imr, chan_status;
  1320. int i, ret = IRQ_NONE;
  1321. do {
  1322. imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
  1323. status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
  1324. pending = status & imr;
  1325. dev_vdbg(atxdmac->dma.dev,
  1326. "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
  1327. __func__, status, imr, pending);
  1328. if (!pending)
  1329. break;
  1330. /* We have to find which channel has generated the interrupt. */
  1331. for (i = 0; i < atxdmac->dma.chancnt; i++) {
  1332. if (!((1 << i) & pending))
  1333. continue;
  1334. atchan = &atxdmac->chan[i];
  1335. chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
  1336. chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
  1337. atchan->status = chan_status & chan_imr;
  1338. dev_vdbg(atxdmac->dma.dev,
  1339. "%s: chan%d: imr=0x%x, status=0x%x\n",
  1340. __func__, i, chan_imr, chan_status);
  1341. dev_vdbg(chan2dev(&atchan->chan),
  1342. "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
  1343. __func__,
  1344. at_xdmac_chan_read(atchan, AT_XDMAC_CC),
  1345. at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
  1346. at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
  1347. at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
  1348. at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
  1349. at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
  1350. if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
  1351. at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
  1352. tasklet_schedule(&atchan->tasklet);
  1353. ret = IRQ_HANDLED;
  1354. }
  1355. } while (pending);
  1356. return ret;
  1357. }
  1358. static void at_xdmac_issue_pending(struct dma_chan *chan)
  1359. {
  1360. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1361. dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
  1362. if (!at_xdmac_chan_is_cyclic(atchan))
  1363. at_xdmac_advance_work(atchan);
  1364. return;
  1365. }
  1366. static int at_xdmac_device_config(struct dma_chan *chan,
  1367. struct dma_slave_config *config)
  1368. {
  1369. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1370. int ret;
  1371. unsigned long flags;
  1372. dev_dbg(chan2dev(chan), "%s\n", __func__);
  1373. spin_lock_irqsave(&atchan->lock, flags);
  1374. ret = at_xdmac_set_slave_config(chan, config);
  1375. spin_unlock_irqrestore(&atchan->lock, flags);
  1376. return ret;
  1377. }
  1378. static int at_xdmac_device_pause(struct dma_chan *chan)
  1379. {
  1380. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1381. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1382. unsigned long flags;
  1383. dev_dbg(chan2dev(chan), "%s\n", __func__);
  1384. if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
  1385. return 0;
  1386. spin_lock_irqsave(&atchan->lock, flags);
  1387. at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
  1388. while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
  1389. & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
  1390. cpu_relax();
  1391. spin_unlock_irqrestore(&atchan->lock, flags);
  1392. return 0;
  1393. }
  1394. static int at_xdmac_device_resume(struct dma_chan *chan)
  1395. {
  1396. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1397. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1398. unsigned long flags;
  1399. dev_dbg(chan2dev(chan), "%s\n", __func__);
  1400. spin_lock_irqsave(&atchan->lock, flags);
  1401. if (!at_xdmac_chan_is_paused(atchan)) {
  1402. spin_unlock_irqrestore(&atchan->lock, flags);
  1403. return 0;
  1404. }
  1405. at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
  1406. clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
  1407. spin_unlock_irqrestore(&atchan->lock, flags);
  1408. return 0;
  1409. }
  1410. static int at_xdmac_device_terminate_all(struct dma_chan *chan)
  1411. {
  1412. struct at_xdmac_desc *desc, *_desc;
  1413. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1414. struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
  1415. unsigned long flags;
  1416. dev_dbg(chan2dev(chan), "%s\n", __func__);
  1417. spin_lock_irqsave(&atchan->lock, flags);
  1418. at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
  1419. while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
  1420. cpu_relax();
  1421. /* Cancel all pending transfers. */
  1422. list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
  1423. at_xdmac_remove_xfer(atchan, desc);
  1424. clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
  1425. spin_unlock_irqrestore(&atchan->lock, flags);
  1426. return 0;
  1427. }
  1428. static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
  1429. {
  1430. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1431. struct at_xdmac_desc *desc;
  1432. int i;
  1433. unsigned long flags;
  1434. spin_lock_irqsave(&atchan->lock, flags);
  1435. if (at_xdmac_chan_is_enabled(atchan)) {
  1436. dev_err(chan2dev(chan),
  1437. "can't allocate channel resources (channel enabled)\n");
  1438. i = -EIO;
  1439. goto spin_unlock;
  1440. }
  1441. if (!list_empty(&atchan->free_descs_list)) {
  1442. dev_err(chan2dev(chan),
  1443. "can't allocate channel resources (channel not free from a previous use)\n");
  1444. i = -EIO;
  1445. goto spin_unlock;
  1446. }
  1447. for (i = 0; i < init_nr_desc_per_channel; i++) {
  1448. desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
  1449. if (!desc) {
  1450. dev_warn(chan2dev(chan),
  1451. "only %d descriptors have been allocated\n", i);
  1452. break;
  1453. }
  1454. list_add_tail(&desc->desc_node, &atchan->free_descs_list);
  1455. }
  1456. dma_cookie_init(chan);
  1457. dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
  1458. spin_unlock:
  1459. spin_unlock_irqrestore(&atchan->lock, flags);
  1460. return i;
  1461. }
  1462. static void at_xdmac_free_chan_resources(struct dma_chan *chan)
  1463. {
  1464. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1465. struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
  1466. struct at_xdmac_desc *desc, *_desc;
  1467. list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
  1468. dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
  1469. list_del(&desc->desc_node);
  1470. dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
  1471. }
  1472. return;
  1473. }
  1474. #ifdef CONFIG_PM
  1475. static int atmel_xdmac_prepare(struct device *dev)
  1476. {
  1477. struct platform_device *pdev = to_platform_device(dev);
  1478. struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
  1479. struct dma_chan *chan, *_chan;
  1480. list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
  1481. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1482. /* Wait for transfer completion, except in cyclic case. */
  1483. if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
  1484. return -EAGAIN;
  1485. }
  1486. return 0;
  1487. }
  1488. #else
  1489. # define atmel_xdmac_prepare NULL
  1490. #endif
  1491. #ifdef CONFIG_PM_SLEEP
  1492. static int atmel_xdmac_suspend(struct device *dev)
  1493. {
  1494. struct platform_device *pdev = to_platform_device(dev);
  1495. struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
  1496. struct dma_chan *chan, *_chan;
  1497. list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
  1498. struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
  1499. atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
  1500. if (at_xdmac_chan_is_cyclic(atchan)) {
  1501. if (!at_xdmac_chan_is_paused(atchan))
  1502. at_xdmac_device_pause(chan);
  1503. atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
  1504. atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
  1505. atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
  1506. }
  1507. }
  1508. atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
  1509. at_xdmac_off(atxdmac);
  1510. clk_disable_unprepare(atxdmac->clk);
  1511. return 0;
  1512. }
  1513. static int atmel_xdmac_resume(struct device *dev)
  1514. {
  1515. struct platform_device *pdev = to_platform_device(dev);
  1516. struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
  1517. struct at_xdmac_chan *atchan;
  1518. struct dma_chan *chan, *_chan;
  1519. int i;
  1520. clk_prepare_enable(atxdmac->clk);
  1521. /* Clear pending interrupts. */
  1522. for (i = 0; i < atxdmac->dma.chancnt; i++) {
  1523. atchan = &atxdmac->chan[i];
  1524. while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
  1525. cpu_relax();
  1526. }
  1527. at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
  1528. at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
  1529. list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
  1530. atchan = to_at_xdmac_chan(chan);
  1531. at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
  1532. if (at_xdmac_chan_is_cyclic(atchan)) {
  1533. at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
  1534. at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
  1535. at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
  1536. wmb();
  1537. at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
  1538. }
  1539. }
  1540. return 0;
  1541. }
  1542. #endif /* CONFIG_PM_SLEEP */
  1543. static int at_xdmac_probe(struct platform_device *pdev)
  1544. {
  1545. struct resource *res;
  1546. struct at_xdmac *atxdmac;
  1547. int irq, size, nr_channels, i, ret;
  1548. void __iomem *base;
  1549. u32 reg;
  1550. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1551. if (!res)
  1552. return -EINVAL;
  1553. irq = platform_get_irq(pdev, 0);
  1554. if (irq < 0)
  1555. return irq;
  1556. base = devm_ioremap_resource(&pdev->dev, res);
  1557. if (IS_ERR(base))
  1558. return PTR_ERR(base);
  1559. /*
  1560. * Read number of xdmac channels, read helper function can't be used
  1561. * since atxdmac is not yet allocated and we need to know the number
  1562. * of channels to do the allocation.
  1563. */
  1564. reg = readl_relaxed(base + AT_XDMAC_GTYPE);
  1565. nr_channels = AT_XDMAC_NB_CH(reg);
  1566. if (nr_channels > AT_XDMAC_MAX_CHAN) {
  1567. dev_err(&pdev->dev, "invalid number of channels (%u)\n",
  1568. nr_channels);
  1569. return -EINVAL;
  1570. }
  1571. size = sizeof(*atxdmac);
  1572. size += nr_channels * sizeof(struct at_xdmac_chan);
  1573. atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
  1574. if (!atxdmac) {
  1575. dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
  1576. return -ENOMEM;
  1577. }
  1578. atxdmac->regs = base;
  1579. atxdmac->irq = irq;
  1580. atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
  1581. if (IS_ERR(atxdmac->clk)) {
  1582. dev_err(&pdev->dev, "can't get dma_clk\n");
  1583. return PTR_ERR(atxdmac->clk);
  1584. }
  1585. /* Do not use dev res to prevent races with tasklet */
  1586. ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
  1587. if (ret) {
  1588. dev_err(&pdev->dev, "can't request irq\n");
  1589. return ret;
  1590. }
  1591. ret = clk_prepare_enable(atxdmac->clk);
  1592. if (ret) {
  1593. dev_err(&pdev->dev, "can't prepare or enable clock\n");
  1594. goto err_free_irq;
  1595. }
  1596. atxdmac->at_xdmac_desc_pool =
  1597. dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
  1598. sizeof(struct at_xdmac_desc), 4, 0);
  1599. if (!atxdmac->at_xdmac_desc_pool) {
  1600. dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
  1601. ret = -ENOMEM;
  1602. goto err_clk_disable;
  1603. }
  1604. dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
  1605. dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
  1606. dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
  1607. dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
  1608. dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
  1609. dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
  1610. /*
  1611. * Without DMA_PRIVATE the driver is not able to allocate more than
  1612. * one channel, second allocation fails in private_candidate.
  1613. */
  1614. dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
  1615. atxdmac->dma.dev = &pdev->dev;
  1616. atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
  1617. atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
  1618. atxdmac->dma.device_tx_status = at_xdmac_tx_status;
  1619. atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
  1620. atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
  1621. atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
  1622. atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
  1623. atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
  1624. atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg;
  1625. atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
  1626. atxdmac->dma.device_config = at_xdmac_device_config;
  1627. atxdmac->dma.device_pause = at_xdmac_device_pause;
  1628. atxdmac->dma.device_resume = at_xdmac_device_resume;
  1629. atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
  1630. atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
  1631. atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
  1632. atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  1633. atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1634. /* Disable all chans and interrupts. */
  1635. at_xdmac_off(atxdmac);
  1636. /* Init channels. */
  1637. INIT_LIST_HEAD(&atxdmac->dma.channels);
  1638. for (i = 0; i < nr_channels; i++) {
  1639. struct at_xdmac_chan *atchan = &atxdmac->chan[i];
  1640. atchan->chan.device = &atxdmac->dma;
  1641. list_add_tail(&atchan->chan.device_node,
  1642. &atxdmac->dma.channels);
  1643. atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
  1644. atchan->mask = 1 << i;
  1645. spin_lock_init(&atchan->lock);
  1646. INIT_LIST_HEAD(&atchan->xfers_list);
  1647. INIT_LIST_HEAD(&atchan->free_descs_list);
  1648. tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
  1649. (unsigned long)atchan);
  1650. /* Clear pending interrupts. */
  1651. while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
  1652. cpu_relax();
  1653. }
  1654. platform_set_drvdata(pdev, atxdmac);
  1655. ret = dma_async_device_register(&atxdmac->dma);
  1656. if (ret) {
  1657. dev_err(&pdev->dev, "fail to register DMA engine device\n");
  1658. goto err_clk_disable;
  1659. }
  1660. ret = of_dma_controller_register(pdev->dev.of_node,
  1661. at_xdmac_xlate, atxdmac);
  1662. if (ret) {
  1663. dev_err(&pdev->dev, "could not register of dma controller\n");
  1664. goto err_dma_unregister;
  1665. }
  1666. dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
  1667. nr_channels, atxdmac->regs);
  1668. return 0;
  1669. err_dma_unregister:
  1670. dma_async_device_unregister(&atxdmac->dma);
  1671. err_clk_disable:
  1672. clk_disable_unprepare(atxdmac->clk);
  1673. err_free_irq:
  1674. free_irq(atxdmac->irq, atxdmac->dma.dev);
  1675. return ret;
  1676. }
  1677. static int at_xdmac_remove(struct platform_device *pdev)
  1678. {
  1679. struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
  1680. int i;
  1681. at_xdmac_off(atxdmac);
  1682. of_dma_controller_free(pdev->dev.of_node);
  1683. dma_async_device_unregister(&atxdmac->dma);
  1684. clk_disable_unprepare(atxdmac->clk);
  1685. synchronize_irq(atxdmac->irq);
  1686. free_irq(atxdmac->irq, atxdmac->dma.dev);
  1687. for (i = 0; i < atxdmac->dma.chancnt; i++) {
  1688. struct at_xdmac_chan *atchan = &atxdmac->chan[i];
  1689. tasklet_kill(&atchan->tasklet);
  1690. at_xdmac_free_chan_resources(&atchan->chan);
  1691. }
  1692. return 0;
  1693. }
  1694. static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
  1695. .prepare = atmel_xdmac_prepare,
  1696. SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
  1697. };
  1698. static const struct of_device_id atmel_xdmac_dt_ids[] = {
  1699. {
  1700. .compatible = "atmel,sama5d4-dma",
  1701. }, {
  1702. /* sentinel */
  1703. }
  1704. };
  1705. MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
  1706. static struct platform_driver at_xdmac_driver = {
  1707. .probe = at_xdmac_probe,
  1708. .remove = at_xdmac_remove,
  1709. .driver = {
  1710. .name = "at_xdmac",
  1711. .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
  1712. .pm = &atmel_xdmac_dev_pm_ops,
  1713. }
  1714. };
  1715. static int __init at_xdmac_init(void)
  1716. {
  1717. return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
  1718. }
  1719. subsys_initcall(at_xdmac_init);
  1720. MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
  1721. MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
  1722. MODULE_LICENSE("GPL");