edma.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539
  1. /*
  2. * TI EDMA DMA engine driver
  3. *
  4. * Copyright 2012 Texas Instruments
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation version 2.
  9. *
  10. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11. * kind, whether express or implied; without even the implied warranty
  12. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/dmaengine.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/edma.h>
  18. #include <linux/err.h>
  19. #include <linux/init.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/list.h>
  22. #include <linux/module.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/slab.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/of.h>
  27. #include <linux/of_dma.h>
  28. #include <linux/of_irq.h>
  29. #include <linux/of_address.h>
  30. #include <linux/of_device.h>
  31. #include <linux/pm_runtime.h>
  32. #include <linux/platform_data/edma.h>
  33. #include "dmaengine.h"
  34. #include "virt-dma.h"
  35. /* Offsets matching "struct edmacc_param" */
  36. #define PARM_OPT 0x00
  37. #define PARM_SRC 0x04
  38. #define PARM_A_B_CNT 0x08
  39. #define PARM_DST 0x0c
  40. #define PARM_SRC_DST_BIDX 0x10
  41. #define PARM_LINK_BCNTRLD 0x14
  42. #define PARM_SRC_DST_CIDX 0x18
  43. #define PARM_CCNT 0x1c
  44. #define PARM_SIZE 0x20
  45. /* Offsets for EDMA CC global channel registers and their shadows */
  46. #define SH_ER 0x00 /* 64 bits */
  47. #define SH_ECR 0x08 /* 64 bits */
  48. #define SH_ESR 0x10 /* 64 bits */
  49. #define SH_CER 0x18 /* 64 bits */
  50. #define SH_EER 0x20 /* 64 bits */
  51. #define SH_EECR 0x28 /* 64 bits */
  52. #define SH_EESR 0x30 /* 64 bits */
  53. #define SH_SER 0x38 /* 64 bits */
  54. #define SH_SECR 0x40 /* 64 bits */
  55. #define SH_IER 0x50 /* 64 bits */
  56. #define SH_IECR 0x58 /* 64 bits */
  57. #define SH_IESR 0x60 /* 64 bits */
  58. #define SH_IPR 0x68 /* 64 bits */
  59. #define SH_ICR 0x70 /* 64 bits */
  60. #define SH_IEVAL 0x78
  61. #define SH_QER 0x80
  62. #define SH_QEER 0x84
  63. #define SH_QEECR 0x88
  64. #define SH_QEESR 0x8c
  65. #define SH_QSER 0x90
  66. #define SH_QSECR 0x94
  67. #define SH_SIZE 0x200
  68. /* Offsets for EDMA CC global registers */
  69. #define EDMA_REV 0x0000
  70. #define EDMA_CCCFG 0x0004
  71. #define EDMA_QCHMAP 0x0200 /* 8 registers */
  72. #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
  73. #define EDMA_QDMAQNUM 0x0260
  74. #define EDMA_QUETCMAP 0x0280
  75. #define EDMA_QUEPRI 0x0284
  76. #define EDMA_EMR 0x0300 /* 64 bits */
  77. #define EDMA_EMCR 0x0308 /* 64 bits */
  78. #define EDMA_QEMR 0x0310
  79. #define EDMA_QEMCR 0x0314
  80. #define EDMA_CCERR 0x0318
  81. #define EDMA_CCERRCLR 0x031c
  82. #define EDMA_EEVAL 0x0320
  83. #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
  84. #define EDMA_QRAE 0x0380 /* 4 registers */
  85. #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
  86. #define EDMA_QSTAT 0x0600 /* 2 registers */
  87. #define EDMA_QWMTHRA 0x0620
  88. #define EDMA_QWMTHRB 0x0624
  89. #define EDMA_CCSTAT 0x0640
  90. #define EDMA_M 0x1000 /* global channel registers */
  91. #define EDMA_ECR 0x1008
  92. #define EDMA_ECRH 0x100C
  93. #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */
  94. #define EDMA_PARM 0x4000 /* PaRAM entries */
  95. #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
  96. #define EDMA_DCHMAP 0x0100 /* 64 registers */
  97. /* CCCFG register */
  98. #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
  99. #define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */
  100. #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
  101. #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
  102. #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
  103. #define CHMAP_EXIST BIT(24)
  104. /* CCSTAT register */
  105. #define EDMA_CCSTAT_ACTV BIT(4)
  106. /*
  107. * Max of 20 segments per channel to conserve PaRAM slots
  108. * Also note that MAX_NR_SG should be atleast the no.of periods
  109. * that are required for ASoC, otherwise DMA prep calls will
  110. * fail. Today davinci-pcm is the only user of this driver and
  111. * requires atleast 17 slots, so we setup the default to 20.
  112. */
  113. #define MAX_NR_SG 20
  114. #define EDMA_MAX_SLOTS MAX_NR_SG
  115. #define EDMA_DESCRIPTORS 16
  116. #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
  117. #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
  118. #define EDMA_CONT_PARAMS_ANY 1001
  119. #define EDMA_CONT_PARAMS_FIXED_EXACT 1002
  120. #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
  121. /* PaRAM slots are laid out like this */
  122. struct edmacc_param {
  123. u32 opt;
  124. u32 src;
  125. u32 a_b_cnt;
  126. u32 dst;
  127. u32 src_dst_bidx;
  128. u32 link_bcntrld;
  129. u32 src_dst_cidx;
  130. u32 ccnt;
  131. } __packed;
  132. /* fields in edmacc_param.opt */
  133. #define SAM BIT(0)
  134. #define DAM BIT(1)
  135. #define SYNCDIM BIT(2)
  136. #define STATIC BIT(3)
  137. #define EDMA_FWID (0x07 << 8)
  138. #define TCCMODE BIT(11)
  139. #define EDMA_TCC(t) ((t) << 12)
  140. #define TCINTEN BIT(20)
  141. #define ITCINTEN BIT(21)
  142. #define TCCHEN BIT(22)
  143. #define ITCCHEN BIT(23)
  144. struct edma_pset {
  145. u32 len;
  146. dma_addr_t addr;
  147. struct edmacc_param param;
  148. };
  149. struct edma_desc {
  150. struct virt_dma_desc vdesc;
  151. struct list_head node;
  152. enum dma_transfer_direction direction;
  153. int cyclic;
  154. int absync;
  155. int pset_nr;
  156. struct edma_chan *echan;
  157. int processed;
  158. /*
  159. * The following 4 elements are used for residue accounting.
  160. *
  161. * - processed_stat: the number of SG elements we have traversed
  162. * so far to cover accounting. This is updated directly to processed
  163. * during edma_callback and is always <= processed, because processed
  164. * refers to the number of pending transfer (programmed to EDMA
  165. * controller), where as processed_stat tracks number of transfers
  166. * accounted for so far.
  167. *
  168. * - residue: The amount of bytes we have left to transfer for this desc
  169. *
  170. * - residue_stat: The residue in bytes of data we have covered
  171. * so far for accounting. This is updated directly to residue
  172. * during callbacks to keep it current.
  173. *
  174. * - sg_len: Tracks the length of the current intermediate transfer,
  175. * this is required to update the residue during intermediate transfer
  176. * completion callback.
  177. */
  178. int processed_stat;
  179. u32 sg_len;
  180. u32 residue;
  181. u32 residue_stat;
  182. struct edma_pset pset[0];
  183. };
  184. struct edma_cc;
  185. struct edma_tc {
  186. struct device_node *node;
  187. u16 id;
  188. };
  189. struct edma_chan {
  190. struct virt_dma_chan vchan;
  191. struct list_head node;
  192. struct edma_desc *edesc;
  193. struct edma_cc *ecc;
  194. struct edma_tc *tc;
  195. int ch_num;
  196. bool alloced;
  197. bool hw_triggered;
  198. int slot[EDMA_MAX_SLOTS];
  199. int missed;
  200. struct dma_slave_config cfg;
  201. };
  202. struct edma_cc {
  203. struct device *dev;
  204. struct edma_soc_info *info;
  205. void __iomem *base;
  206. int id;
  207. bool legacy_mode;
  208. /* eDMA3 resource information */
  209. unsigned num_channels;
  210. unsigned num_qchannels;
  211. unsigned num_region;
  212. unsigned num_slots;
  213. unsigned num_tc;
  214. bool chmap_exist;
  215. enum dma_event_q default_queue;
  216. unsigned int ccint;
  217. unsigned int ccerrint;
  218. /*
  219. * The slot_inuse bit for each PaRAM slot is clear unless the slot is
  220. * in use by Linux or if it is allocated to be used by DSP.
  221. */
  222. unsigned long *slot_inuse;
  223. struct dma_device dma_slave;
  224. struct dma_device *dma_memcpy;
  225. struct edma_chan *slave_chans;
  226. struct edma_tc *tc_list;
  227. int dummy_slot;
  228. };
  229. /* dummy param set used to (re)initialize parameter RAM slots */
  230. static const struct edmacc_param dummy_paramset = {
  231. .link_bcntrld = 0xffff,
  232. .ccnt = 1,
  233. };
  234. #define EDMA_BINDING_LEGACY 0
  235. #define EDMA_BINDING_TPCC 1
  236. static const struct of_device_id edma_of_ids[] = {
  237. {
  238. .compatible = "ti,edma3",
  239. .data = (void *)EDMA_BINDING_LEGACY,
  240. },
  241. {
  242. .compatible = "ti,edma3-tpcc",
  243. .data = (void *)EDMA_BINDING_TPCC,
  244. },
  245. {}
  246. };
  247. static const struct of_device_id edma_tptc_of_ids[] = {
  248. { .compatible = "ti,edma3-tptc", },
  249. {}
  250. };
  251. static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
  252. {
  253. return (unsigned int)__raw_readl(ecc->base + offset);
  254. }
  255. static inline void edma_write(struct edma_cc *ecc, int offset, int val)
  256. {
  257. __raw_writel(val, ecc->base + offset);
  258. }
  259. static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
  260. unsigned or)
  261. {
  262. unsigned val = edma_read(ecc, offset);
  263. val &= and;
  264. val |= or;
  265. edma_write(ecc, offset, val);
  266. }
  267. static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
  268. {
  269. unsigned val = edma_read(ecc, offset);
  270. val &= and;
  271. edma_write(ecc, offset, val);
  272. }
  273. static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
  274. {
  275. unsigned val = edma_read(ecc, offset);
  276. val |= or;
  277. edma_write(ecc, offset, val);
  278. }
  279. static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
  280. int i)
  281. {
  282. return edma_read(ecc, offset + (i << 2));
  283. }
  284. static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
  285. unsigned val)
  286. {
  287. edma_write(ecc, offset + (i << 2), val);
  288. }
  289. static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
  290. unsigned and, unsigned or)
  291. {
  292. edma_modify(ecc, offset + (i << 2), and, or);
  293. }
  294. static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
  295. unsigned or)
  296. {
  297. edma_or(ecc, offset + (i << 2), or);
  298. }
  299. static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
  300. unsigned or)
  301. {
  302. edma_or(ecc, offset + ((i * 2 + j) << 2), or);
  303. }
  304. static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
  305. int j, unsigned val)
  306. {
  307. edma_write(ecc, offset + ((i * 2 + j) << 2), val);
  308. }
  309. static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
  310. {
  311. return edma_read(ecc, EDMA_SHADOW0 + offset);
  312. }
  313. static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
  314. int offset, int i)
  315. {
  316. return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
  317. }
  318. static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
  319. unsigned val)
  320. {
  321. edma_write(ecc, EDMA_SHADOW0 + offset, val);
  322. }
  323. static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
  324. int i, unsigned val)
  325. {
  326. edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
  327. }
  328. static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
  329. int param_no)
  330. {
  331. return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
  332. }
  333. static inline void edma_param_write(struct edma_cc *ecc, int offset,
  334. int param_no, unsigned val)
  335. {
  336. edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
  337. }
  338. static inline void edma_param_modify(struct edma_cc *ecc, int offset,
  339. int param_no, unsigned and, unsigned or)
  340. {
  341. edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
  342. }
  343. static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
  344. unsigned and)
  345. {
  346. edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
  347. }
  348. static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
  349. unsigned or)
  350. {
  351. edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
  352. }
  353. static inline void set_bits(int offset, int len, unsigned long *p)
  354. {
  355. for (; len > 0; len--)
  356. set_bit(offset + (len - 1), p);
  357. }
  358. static inline void clear_bits(int offset, int len, unsigned long *p)
  359. {
  360. for (; len > 0; len--)
  361. clear_bit(offset + (len - 1), p);
  362. }
  363. static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
  364. int priority)
  365. {
  366. int bit = queue_no * 4;
  367. edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
  368. }
  369. static void edma_set_chmap(struct edma_chan *echan, int slot)
  370. {
  371. struct edma_cc *ecc = echan->ecc;
  372. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  373. if (ecc->chmap_exist) {
  374. slot = EDMA_CHAN_SLOT(slot);
  375. edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
  376. }
  377. }
  378. static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
  379. {
  380. struct edma_cc *ecc = echan->ecc;
  381. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  382. if (enable) {
  383. edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
  384. BIT(channel & 0x1f));
  385. edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
  386. BIT(channel & 0x1f));
  387. } else {
  388. edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
  389. BIT(channel & 0x1f));
  390. }
  391. }
  392. /*
  393. * paRAM slot management functions
  394. */
  395. static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
  396. const struct edmacc_param *param)
  397. {
  398. slot = EDMA_CHAN_SLOT(slot);
  399. if (slot >= ecc->num_slots)
  400. return;
  401. memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
  402. }
  403. static void edma_read_slot(struct edma_cc *ecc, unsigned slot,
  404. struct edmacc_param *param)
  405. {
  406. slot = EDMA_CHAN_SLOT(slot);
  407. if (slot >= ecc->num_slots)
  408. return;
  409. memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
  410. }
  411. /**
  412. * edma_alloc_slot - allocate DMA parameter RAM
  413. * @ecc: pointer to edma_cc struct
  414. * @slot: specific slot to allocate; negative for "any unused slot"
  415. *
  416. * This allocates a parameter RAM slot, initializing it to hold a
  417. * dummy transfer. Slots allocated using this routine have not been
  418. * mapped to a hardware DMA channel, and will normally be used by
  419. * linking to them from a slot associated with a DMA channel.
  420. *
  421. * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
  422. * slots may be allocated on behalf of DSP firmware.
  423. *
  424. * Returns the number of the slot, else negative errno.
  425. */
  426. static int edma_alloc_slot(struct edma_cc *ecc, int slot)
  427. {
  428. if (slot >= 0) {
  429. slot = EDMA_CHAN_SLOT(slot);
  430. /* Requesting entry paRAM slot for a HW triggered channel. */
  431. if (ecc->chmap_exist && slot < ecc->num_channels)
  432. slot = EDMA_SLOT_ANY;
  433. }
  434. if (slot < 0) {
  435. if (ecc->chmap_exist)
  436. slot = 0;
  437. else
  438. slot = ecc->num_channels;
  439. for (;;) {
  440. slot = find_next_zero_bit(ecc->slot_inuse,
  441. ecc->num_slots,
  442. slot);
  443. if (slot == ecc->num_slots)
  444. return -ENOMEM;
  445. if (!test_and_set_bit(slot, ecc->slot_inuse))
  446. break;
  447. }
  448. } else if (slot >= ecc->num_slots) {
  449. return -EINVAL;
  450. } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
  451. return -EBUSY;
  452. }
  453. edma_write_slot(ecc, slot, &dummy_paramset);
  454. return EDMA_CTLR_CHAN(ecc->id, slot);
  455. }
  456. static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
  457. {
  458. slot = EDMA_CHAN_SLOT(slot);
  459. if (slot >= ecc->num_slots)
  460. return;
  461. edma_write_slot(ecc, slot, &dummy_paramset);
  462. clear_bit(slot, ecc->slot_inuse);
  463. }
  464. /**
  465. * edma_link - link one parameter RAM slot to another
  466. * @ecc: pointer to edma_cc struct
  467. * @from: parameter RAM slot originating the link
  468. * @to: parameter RAM slot which is the link target
  469. *
  470. * The originating slot should not be part of any active DMA transfer.
  471. */
  472. static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
  473. {
  474. if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
  475. dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
  476. from = EDMA_CHAN_SLOT(from);
  477. to = EDMA_CHAN_SLOT(to);
  478. if (from >= ecc->num_slots || to >= ecc->num_slots)
  479. return;
  480. edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
  481. PARM_OFFSET(to));
  482. }
  483. /**
  484. * edma_get_position - returns the current transfer point
  485. * @ecc: pointer to edma_cc struct
  486. * @slot: parameter RAM slot being examined
  487. * @dst: true selects the dest position, false the source
  488. *
  489. * Returns the position of the current active slot
  490. */
  491. static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
  492. bool dst)
  493. {
  494. u32 offs;
  495. slot = EDMA_CHAN_SLOT(slot);
  496. offs = PARM_OFFSET(slot);
  497. offs += dst ? PARM_DST : PARM_SRC;
  498. return edma_read(ecc, offs);
  499. }
  500. /*
  501. * Channels with event associations will be triggered by their hardware
  502. * events, and channels without such associations will be triggered by
  503. * software. (At this writing there is no interface for using software
  504. * triggers except with channels that don't support hardware triggers.)
  505. */
  506. static void edma_start(struct edma_chan *echan)
  507. {
  508. struct edma_cc *ecc = echan->ecc;
  509. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  510. int j = (channel >> 5);
  511. unsigned int mask = BIT(channel & 0x1f);
  512. if (!echan->hw_triggered) {
  513. /* EDMA channels without event association */
  514. dev_dbg(ecc->dev, "ESR%d %08x\n", j,
  515. edma_shadow0_read_array(ecc, SH_ESR, j));
  516. edma_shadow0_write_array(ecc, SH_ESR, j, mask);
  517. } else {
  518. /* EDMA channel with event association */
  519. dev_dbg(ecc->dev, "ER%d %08x\n", j,
  520. edma_shadow0_read_array(ecc, SH_ER, j));
  521. /* Clear any pending event or error */
  522. edma_write_array(ecc, EDMA_ECR, j, mask);
  523. edma_write_array(ecc, EDMA_EMCR, j, mask);
  524. /* Clear any SER */
  525. edma_shadow0_write_array(ecc, SH_SECR, j, mask);
  526. edma_shadow0_write_array(ecc, SH_EESR, j, mask);
  527. dev_dbg(ecc->dev, "EER%d %08x\n", j,
  528. edma_shadow0_read_array(ecc, SH_EER, j));
  529. }
  530. }
  531. static void edma_stop(struct edma_chan *echan)
  532. {
  533. struct edma_cc *ecc = echan->ecc;
  534. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  535. int j = (channel >> 5);
  536. unsigned int mask = BIT(channel & 0x1f);
  537. edma_shadow0_write_array(ecc, SH_EECR, j, mask);
  538. edma_shadow0_write_array(ecc, SH_ECR, j, mask);
  539. edma_shadow0_write_array(ecc, SH_SECR, j, mask);
  540. edma_write_array(ecc, EDMA_EMCR, j, mask);
  541. /* clear possibly pending completion interrupt */
  542. edma_shadow0_write_array(ecc, SH_ICR, j, mask);
  543. dev_dbg(ecc->dev, "EER%d %08x\n", j,
  544. edma_shadow0_read_array(ecc, SH_EER, j));
  545. /* REVISIT: consider guarding against inappropriate event
  546. * chaining by overwriting with dummy_paramset.
  547. */
  548. }
  549. /*
  550. * Temporarily disable EDMA hardware events on the specified channel,
  551. * preventing them from triggering new transfers
  552. */
  553. static void edma_pause(struct edma_chan *echan)
  554. {
  555. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  556. unsigned int mask = BIT(channel & 0x1f);
  557. edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
  558. }
  559. /* Re-enable EDMA hardware events on the specified channel. */
  560. static void edma_resume(struct edma_chan *echan)
  561. {
  562. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  563. unsigned int mask = BIT(channel & 0x1f);
  564. edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
  565. }
  566. static void edma_trigger_channel(struct edma_chan *echan)
  567. {
  568. struct edma_cc *ecc = echan->ecc;
  569. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  570. unsigned int mask = BIT(channel & 0x1f);
  571. edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
  572. dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
  573. edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
  574. }
  575. static void edma_clean_channel(struct edma_chan *echan)
  576. {
  577. struct edma_cc *ecc = echan->ecc;
  578. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  579. int j = (channel >> 5);
  580. unsigned int mask = BIT(channel & 0x1f);
  581. dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
  582. edma_shadow0_write_array(ecc, SH_ECR, j, mask);
  583. /* Clear the corresponding EMR bits */
  584. edma_write_array(ecc, EDMA_EMCR, j, mask);
  585. /* Clear any SER */
  586. edma_shadow0_write_array(ecc, SH_SECR, j, mask);
  587. edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
  588. }
  589. /* Move channel to a specific event queue */
  590. static void edma_assign_channel_eventq(struct edma_chan *echan,
  591. enum dma_event_q eventq_no)
  592. {
  593. struct edma_cc *ecc = echan->ecc;
  594. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  595. int bit = (channel & 0x7) * 4;
  596. /* default to low priority queue */
  597. if (eventq_no == EVENTQ_DEFAULT)
  598. eventq_no = ecc->default_queue;
  599. if (eventq_no >= ecc->num_tc)
  600. return;
  601. eventq_no &= 7;
  602. edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
  603. eventq_no << bit);
  604. }
  605. static int edma_alloc_channel(struct edma_chan *echan,
  606. enum dma_event_q eventq_no)
  607. {
  608. struct edma_cc *ecc = echan->ecc;
  609. int channel = EDMA_CHAN_SLOT(echan->ch_num);
  610. /* ensure access through shadow region 0 */
  611. edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
  612. /* ensure no events are pending */
  613. edma_stop(echan);
  614. edma_setup_interrupt(echan, true);
  615. edma_assign_channel_eventq(echan, eventq_no);
  616. return 0;
  617. }
  618. static void edma_free_channel(struct edma_chan *echan)
  619. {
  620. /* ensure no events are pending */
  621. edma_stop(echan);
  622. /* REVISIT should probably take out of shadow region 0 */
  623. edma_setup_interrupt(echan, false);
  624. }
  625. static inline struct edma_cc *to_edma_cc(struct dma_device *d)
  626. {
  627. return container_of(d, struct edma_cc, dma_slave);
  628. }
  629. static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
  630. {
  631. return container_of(c, struct edma_chan, vchan.chan);
  632. }
  633. static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
  634. {
  635. return container_of(tx, struct edma_desc, vdesc.tx);
  636. }
  637. static void edma_desc_free(struct virt_dma_desc *vdesc)
  638. {
  639. kfree(container_of(vdesc, struct edma_desc, vdesc));
  640. }
  641. /* Dispatch a queued descriptor to the controller (caller holds lock) */
  642. static void edma_execute(struct edma_chan *echan)
  643. {
  644. struct edma_cc *ecc = echan->ecc;
  645. struct virt_dma_desc *vdesc;
  646. struct edma_desc *edesc;
  647. struct device *dev = echan->vchan.chan.device->dev;
  648. int i, j, left, nslots;
  649. if (!echan->edesc) {
  650. /* Setup is needed for the first transfer */
  651. vdesc = vchan_next_desc(&echan->vchan);
  652. if (!vdesc)
  653. return;
  654. list_del(&vdesc->node);
  655. echan->edesc = to_edma_desc(&vdesc->tx);
  656. }
  657. edesc = echan->edesc;
  658. /* Find out how many left */
  659. left = edesc->pset_nr - edesc->processed;
  660. nslots = min(MAX_NR_SG, left);
  661. edesc->sg_len = 0;
  662. /* Write descriptor PaRAM set(s) */
  663. for (i = 0; i < nslots; i++) {
  664. j = i + edesc->processed;
  665. edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
  666. edesc->sg_len += edesc->pset[j].len;
  667. dev_vdbg(dev,
  668. "\n pset[%d]:\n"
  669. " chnum\t%d\n"
  670. " slot\t%d\n"
  671. " opt\t%08x\n"
  672. " src\t%08x\n"
  673. " dst\t%08x\n"
  674. " abcnt\t%08x\n"
  675. " ccnt\t%08x\n"
  676. " bidx\t%08x\n"
  677. " cidx\t%08x\n"
  678. " lkrld\t%08x\n",
  679. j, echan->ch_num, echan->slot[i],
  680. edesc->pset[j].param.opt,
  681. edesc->pset[j].param.src,
  682. edesc->pset[j].param.dst,
  683. edesc->pset[j].param.a_b_cnt,
  684. edesc->pset[j].param.ccnt,
  685. edesc->pset[j].param.src_dst_bidx,
  686. edesc->pset[j].param.src_dst_cidx,
  687. edesc->pset[j].param.link_bcntrld);
  688. /* Link to the previous slot if not the last set */
  689. if (i != (nslots - 1))
  690. edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
  691. }
  692. edesc->processed += nslots;
  693. /*
  694. * If this is either the last set in a set of SG-list transactions
  695. * then setup a link to the dummy slot, this results in all future
  696. * events being absorbed and that's OK because we're done
  697. */
  698. if (edesc->processed == edesc->pset_nr) {
  699. if (edesc->cyclic)
  700. edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
  701. else
  702. edma_link(ecc, echan->slot[nslots - 1],
  703. echan->ecc->dummy_slot);
  704. }
  705. if (echan->missed) {
  706. /*
  707. * This happens due to setup times between intermediate
  708. * transfers in long SG lists which have to be broken up into
  709. * transfers of MAX_NR_SG
  710. */
  711. dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
  712. edma_clean_channel(echan);
  713. edma_stop(echan);
  714. edma_start(echan);
  715. edma_trigger_channel(echan);
  716. echan->missed = 0;
  717. } else if (edesc->processed <= MAX_NR_SG) {
  718. dev_dbg(dev, "first transfer starting on channel %d\n",
  719. echan->ch_num);
  720. edma_start(echan);
  721. } else {
  722. dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
  723. echan->ch_num, edesc->processed);
  724. edma_resume(echan);
  725. }
  726. }
  727. static int edma_terminate_all(struct dma_chan *chan)
  728. {
  729. struct edma_chan *echan = to_edma_chan(chan);
  730. unsigned long flags;
  731. LIST_HEAD(head);
  732. spin_lock_irqsave(&echan->vchan.lock, flags);
  733. /*
  734. * Stop DMA activity: we assume the callback will not be called
  735. * after edma_dma() returns (even if it does, it will see
  736. * echan->edesc is NULL and exit.)
  737. */
  738. if (echan->edesc) {
  739. edma_stop(echan);
  740. /* Move the cyclic channel back to default queue */
  741. if (!echan->tc && echan->edesc->cyclic)
  742. edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
  743. /*
  744. * free the running request descriptor
  745. * since it is not in any of the vdesc lists
  746. */
  747. edma_desc_free(&echan->edesc->vdesc);
  748. echan->edesc = NULL;
  749. }
  750. vchan_get_all_descriptors(&echan->vchan, &head);
  751. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  752. vchan_dma_desc_free_list(&echan->vchan, &head);
  753. return 0;
  754. }
  755. static void edma_synchronize(struct dma_chan *chan)
  756. {
  757. struct edma_chan *echan = to_edma_chan(chan);
  758. vchan_synchronize(&echan->vchan);
  759. }
  760. static int edma_slave_config(struct dma_chan *chan,
  761. struct dma_slave_config *cfg)
  762. {
  763. struct edma_chan *echan = to_edma_chan(chan);
  764. if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
  765. cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
  766. return -EINVAL;
  767. memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
  768. return 0;
  769. }
  770. static int edma_dma_pause(struct dma_chan *chan)
  771. {
  772. struct edma_chan *echan = to_edma_chan(chan);
  773. if (!echan->edesc)
  774. return -EINVAL;
  775. edma_pause(echan);
  776. return 0;
  777. }
  778. static int edma_dma_resume(struct dma_chan *chan)
  779. {
  780. struct edma_chan *echan = to_edma_chan(chan);
  781. edma_resume(echan);
  782. return 0;
  783. }
  784. /*
  785. * A PaRAM set configuration abstraction used by other modes
  786. * @chan: Channel who's PaRAM set we're configuring
  787. * @pset: PaRAM set to initialize and setup.
  788. * @src_addr: Source address of the DMA
  789. * @dst_addr: Destination address of the DMA
  790. * @burst: In units of dev_width, how much to send
  791. * @dev_width: How much is the dev_width
  792. * @dma_length: Total length of the DMA transfer
  793. * @direction: Direction of the transfer
  794. */
  795. static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
  796. dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
  797. unsigned int acnt, unsigned int dma_length,
  798. enum dma_transfer_direction direction)
  799. {
  800. struct edma_chan *echan = to_edma_chan(chan);
  801. struct device *dev = chan->device->dev;
  802. struct edmacc_param *param = &epset->param;
  803. int bcnt, ccnt, cidx;
  804. int src_bidx, dst_bidx, src_cidx, dst_cidx;
  805. int absync;
  806. /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
  807. if (!burst)
  808. burst = 1;
  809. /*
  810. * If the maxburst is equal to the fifo width, use
  811. * A-synced transfers. This allows for large contiguous
  812. * buffer transfers using only one PaRAM set.
  813. */
  814. if (burst == 1) {
  815. /*
  816. * For the A-sync case, bcnt and ccnt are the remainder
  817. * and quotient respectively of the division of:
  818. * (dma_length / acnt) by (SZ_64K -1). This is so
  819. * that in case bcnt over flows, we have ccnt to use.
  820. * Note: In A-sync tranfer only, bcntrld is used, but it
  821. * only applies for sg_dma_len(sg) >= SZ_64K.
  822. * In this case, the best way adopted is- bccnt for the
  823. * first frame will be the remainder below. Then for
  824. * every successive frame, bcnt will be SZ_64K-1. This
  825. * is assured as bcntrld = 0xffff in end of function.
  826. */
  827. absync = false;
  828. ccnt = dma_length / acnt / (SZ_64K - 1);
  829. bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
  830. /*
  831. * If bcnt is non-zero, we have a remainder and hence an
  832. * extra frame to transfer, so increment ccnt.
  833. */
  834. if (bcnt)
  835. ccnt++;
  836. else
  837. bcnt = SZ_64K - 1;
  838. cidx = acnt;
  839. } else {
  840. /*
  841. * If maxburst is greater than the fifo address_width,
  842. * use AB-synced transfers where A count is the fifo
  843. * address_width and B count is the maxburst. In this
  844. * case, we are limited to transfers of C count frames
  845. * of (address_width * maxburst) where C count is limited
  846. * to SZ_64K-1. This places an upper bound on the length
  847. * of an SG segment that can be handled.
  848. */
  849. absync = true;
  850. bcnt = burst;
  851. ccnt = dma_length / (acnt * bcnt);
  852. if (ccnt > (SZ_64K - 1)) {
  853. dev_err(dev, "Exceeded max SG segment size\n");
  854. return -EINVAL;
  855. }
  856. cidx = acnt * bcnt;
  857. }
  858. epset->len = dma_length;
  859. if (direction == DMA_MEM_TO_DEV) {
  860. src_bidx = acnt;
  861. src_cidx = cidx;
  862. dst_bidx = 0;
  863. dst_cidx = 0;
  864. epset->addr = src_addr;
  865. } else if (direction == DMA_DEV_TO_MEM) {
  866. src_bidx = 0;
  867. src_cidx = 0;
  868. dst_bidx = acnt;
  869. dst_cidx = cidx;
  870. epset->addr = dst_addr;
  871. } else if (direction == DMA_MEM_TO_MEM) {
  872. src_bidx = acnt;
  873. src_cidx = cidx;
  874. dst_bidx = acnt;
  875. dst_cidx = cidx;
  876. } else {
  877. dev_err(dev, "%s: direction not implemented yet\n", __func__);
  878. return -EINVAL;
  879. }
  880. param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
  881. /* Configure A or AB synchronized transfers */
  882. if (absync)
  883. param->opt |= SYNCDIM;
  884. param->src = src_addr;
  885. param->dst = dst_addr;
  886. param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
  887. param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
  888. param->a_b_cnt = bcnt << 16 | acnt;
  889. param->ccnt = ccnt;
  890. /*
  891. * Only time when (bcntrld) auto reload is required is for
  892. * A-sync case, and in this case, a requirement of reload value
  893. * of SZ_64K-1 only is assured. 'link' is initially set to NULL
  894. * and then later will be populated by edma_execute.
  895. */
  896. param->link_bcntrld = 0xffffffff;
  897. return absync;
  898. }
  899. static struct dma_async_tx_descriptor *edma_prep_slave_sg(
  900. struct dma_chan *chan, struct scatterlist *sgl,
  901. unsigned int sg_len, enum dma_transfer_direction direction,
  902. unsigned long tx_flags, void *context)
  903. {
  904. struct edma_chan *echan = to_edma_chan(chan);
  905. struct device *dev = chan->device->dev;
  906. struct edma_desc *edesc;
  907. dma_addr_t src_addr = 0, dst_addr = 0;
  908. enum dma_slave_buswidth dev_width;
  909. u32 burst;
  910. struct scatterlist *sg;
  911. int i, nslots, ret;
  912. if (unlikely(!echan || !sgl || !sg_len))
  913. return NULL;
  914. if (direction == DMA_DEV_TO_MEM) {
  915. src_addr = echan->cfg.src_addr;
  916. dev_width = echan->cfg.src_addr_width;
  917. burst = echan->cfg.src_maxburst;
  918. } else if (direction == DMA_MEM_TO_DEV) {
  919. dst_addr = echan->cfg.dst_addr;
  920. dev_width = echan->cfg.dst_addr_width;
  921. burst = echan->cfg.dst_maxburst;
  922. } else {
  923. dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
  924. return NULL;
  925. }
  926. if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
  927. dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
  928. return NULL;
  929. }
  930. edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
  931. GFP_ATOMIC);
  932. if (!edesc)
  933. return NULL;
  934. edesc->pset_nr = sg_len;
  935. edesc->residue = 0;
  936. edesc->direction = direction;
  937. edesc->echan = echan;
  938. /* Allocate a PaRAM slot, if needed */
  939. nslots = min_t(unsigned, MAX_NR_SG, sg_len);
  940. for (i = 0; i < nslots; i++) {
  941. if (echan->slot[i] < 0) {
  942. echan->slot[i] =
  943. edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
  944. if (echan->slot[i] < 0) {
  945. kfree(edesc);
  946. dev_err(dev, "%s: Failed to allocate slot\n",
  947. __func__);
  948. return NULL;
  949. }
  950. }
  951. }
  952. /* Configure PaRAM sets for each SG */
  953. for_each_sg(sgl, sg, sg_len, i) {
  954. /* Get address for each SG */
  955. if (direction == DMA_DEV_TO_MEM)
  956. dst_addr = sg_dma_address(sg);
  957. else
  958. src_addr = sg_dma_address(sg);
  959. ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
  960. dst_addr, burst, dev_width,
  961. sg_dma_len(sg), direction);
  962. if (ret < 0) {
  963. kfree(edesc);
  964. return NULL;
  965. }
  966. edesc->absync = ret;
  967. edesc->residue += sg_dma_len(sg);
  968. if (i == sg_len - 1)
  969. /* Enable completion interrupt */
  970. edesc->pset[i].param.opt |= TCINTEN;
  971. else if (!((i+1) % MAX_NR_SG))
  972. /*
  973. * Enable early completion interrupt for the
  974. * intermediateset. In this case the driver will be
  975. * notified when the paRAM set is submitted to TC. This
  976. * will allow more time to set up the next set of slots.
  977. */
  978. edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
  979. }
  980. edesc->residue_stat = edesc->residue;
  981. return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
  982. }
  983. static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
  984. struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  985. size_t len, unsigned long tx_flags)
  986. {
  987. int ret, nslots;
  988. struct edma_desc *edesc;
  989. struct device *dev = chan->device->dev;
  990. struct edma_chan *echan = to_edma_chan(chan);
  991. unsigned int width, pset_len;
  992. if (unlikely(!echan || !len))
  993. return NULL;
  994. if (len < SZ_64K) {
  995. /*
  996. * Transfer size less than 64K can be handled with one paRAM
  997. * slot and with one burst.
  998. * ACNT = length
  999. */
  1000. width = len;
  1001. pset_len = len;
  1002. nslots = 1;
  1003. } else {
  1004. /*
  1005. * Transfer size bigger than 64K will be handled with maximum of
  1006. * two paRAM slots.
  1007. * slot1: (full_length / 32767) times 32767 bytes bursts.
  1008. * ACNT = 32767, length1: (full_length / 32767) * 32767
  1009. * slot2: the remaining amount of data after slot1.
  1010. * ACNT = full_length - length1, length2 = ACNT
  1011. *
  1012. * When the full_length is multibple of 32767 one slot can be
  1013. * used to complete the transfer.
  1014. */
  1015. width = SZ_32K - 1;
  1016. pset_len = rounddown(len, width);
  1017. /* One slot is enough for lengths multiple of (SZ_32K -1) */
  1018. if (unlikely(pset_len == len))
  1019. nslots = 1;
  1020. else
  1021. nslots = 2;
  1022. }
  1023. edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
  1024. GFP_ATOMIC);
  1025. if (!edesc)
  1026. return NULL;
  1027. edesc->pset_nr = nslots;
  1028. edesc->residue = edesc->residue_stat = len;
  1029. edesc->direction = DMA_MEM_TO_MEM;
  1030. edesc->echan = echan;
  1031. ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
  1032. width, pset_len, DMA_MEM_TO_MEM);
  1033. if (ret < 0) {
  1034. kfree(edesc);
  1035. return NULL;
  1036. }
  1037. edesc->absync = ret;
  1038. edesc->pset[0].param.opt |= ITCCHEN;
  1039. if (nslots == 1) {
  1040. /* Enable transfer complete interrupt */
  1041. edesc->pset[0].param.opt |= TCINTEN;
  1042. } else {
  1043. /* Enable transfer complete chaining for the first slot */
  1044. edesc->pset[0].param.opt |= TCCHEN;
  1045. if (echan->slot[1] < 0) {
  1046. echan->slot[1] = edma_alloc_slot(echan->ecc,
  1047. EDMA_SLOT_ANY);
  1048. if (echan->slot[1] < 0) {
  1049. kfree(edesc);
  1050. dev_err(dev, "%s: Failed to allocate slot\n",
  1051. __func__);
  1052. return NULL;
  1053. }
  1054. }
  1055. dest += pset_len;
  1056. src += pset_len;
  1057. pset_len = width = len % (SZ_32K - 1);
  1058. ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
  1059. width, pset_len, DMA_MEM_TO_MEM);
  1060. if (ret < 0) {
  1061. kfree(edesc);
  1062. return NULL;
  1063. }
  1064. edesc->pset[1].param.opt |= ITCCHEN;
  1065. edesc->pset[1].param.opt |= TCINTEN;
  1066. }
  1067. return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
  1068. }
  1069. static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
  1070. struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  1071. size_t period_len, enum dma_transfer_direction direction,
  1072. unsigned long tx_flags)
  1073. {
  1074. struct edma_chan *echan = to_edma_chan(chan);
  1075. struct device *dev = chan->device->dev;
  1076. struct edma_desc *edesc;
  1077. dma_addr_t src_addr, dst_addr;
  1078. enum dma_slave_buswidth dev_width;
  1079. bool use_intermediate = false;
  1080. u32 burst;
  1081. int i, ret, nslots;
  1082. if (unlikely(!echan || !buf_len || !period_len))
  1083. return NULL;
  1084. if (direction == DMA_DEV_TO_MEM) {
  1085. src_addr = echan->cfg.src_addr;
  1086. dst_addr = buf_addr;
  1087. dev_width = echan->cfg.src_addr_width;
  1088. burst = echan->cfg.src_maxburst;
  1089. } else if (direction == DMA_MEM_TO_DEV) {
  1090. src_addr = buf_addr;
  1091. dst_addr = echan->cfg.dst_addr;
  1092. dev_width = echan->cfg.dst_addr_width;
  1093. burst = echan->cfg.dst_maxburst;
  1094. } else {
  1095. dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
  1096. return NULL;
  1097. }
  1098. if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
  1099. dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
  1100. return NULL;
  1101. }
  1102. if (unlikely(buf_len % period_len)) {
  1103. dev_err(dev, "Period should be multiple of Buffer length\n");
  1104. return NULL;
  1105. }
  1106. nslots = (buf_len / period_len) + 1;
  1107. /*
  1108. * Cyclic DMA users such as audio cannot tolerate delays introduced
  1109. * by cases where the number of periods is more than the maximum
  1110. * number of SGs the EDMA driver can handle at a time. For DMA types
  1111. * such as Slave SGs, such delays are tolerable and synchronized,
  1112. * but the synchronization is difficult to achieve with Cyclic and
  1113. * cannot be guaranteed, so we error out early.
  1114. */
  1115. if (nslots > MAX_NR_SG) {
  1116. /*
  1117. * If the burst and period sizes are the same, we can put
  1118. * the full buffer into a single period and activate
  1119. * intermediate interrupts. This will produce interrupts
  1120. * after each burst, which is also after each desired period.
  1121. */
  1122. if (burst == period_len) {
  1123. period_len = buf_len;
  1124. nslots = 2;
  1125. use_intermediate = true;
  1126. } else {
  1127. return NULL;
  1128. }
  1129. }
  1130. edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
  1131. GFP_ATOMIC);
  1132. if (!edesc)
  1133. return NULL;
  1134. edesc->cyclic = 1;
  1135. edesc->pset_nr = nslots;
  1136. edesc->residue = edesc->residue_stat = buf_len;
  1137. edesc->direction = direction;
  1138. edesc->echan = echan;
  1139. dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
  1140. __func__, echan->ch_num, nslots, period_len, buf_len);
  1141. for (i = 0; i < nslots; i++) {
  1142. /* Allocate a PaRAM slot, if needed */
  1143. if (echan->slot[i] < 0) {
  1144. echan->slot[i] =
  1145. edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
  1146. if (echan->slot[i] < 0) {
  1147. kfree(edesc);
  1148. dev_err(dev, "%s: Failed to allocate slot\n",
  1149. __func__);
  1150. return NULL;
  1151. }
  1152. }
  1153. if (i == nslots - 1) {
  1154. memcpy(&edesc->pset[i], &edesc->pset[0],
  1155. sizeof(edesc->pset[0]));
  1156. break;
  1157. }
  1158. ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
  1159. dst_addr, burst, dev_width, period_len,
  1160. direction);
  1161. if (ret < 0) {
  1162. kfree(edesc);
  1163. return NULL;
  1164. }
  1165. if (direction == DMA_DEV_TO_MEM)
  1166. dst_addr += period_len;
  1167. else
  1168. src_addr += period_len;
  1169. dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
  1170. dev_vdbg(dev,
  1171. "\n pset[%d]:\n"
  1172. " chnum\t%d\n"
  1173. " slot\t%d\n"
  1174. " opt\t%08x\n"
  1175. " src\t%08x\n"
  1176. " dst\t%08x\n"
  1177. " abcnt\t%08x\n"
  1178. " ccnt\t%08x\n"
  1179. " bidx\t%08x\n"
  1180. " cidx\t%08x\n"
  1181. " lkrld\t%08x\n",
  1182. i, echan->ch_num, echan->slot[i],
  1183. edesc->pset[i].param.opt,
  1184. edesc->pset[i].param.src,
  1185. edesc->pset[i].param.dst,
  1186. edesc->pset[i].param.a_b_cnt,
  1187. edesc->pset[i].param.ccnt,
  1188. edesc->pset[i].param.src_dst_bidx,
  1189. edesc->pset[i].param.src_dst_cidx,
  1190. edesc->pset[i].param.link_bcntrld);
  1191. edesc->absync = ret;
  1192. /*
  1193. * Enable period interrupt only if it is requested
  1194. */
  1195. if (tx_flags & DMA_PREP_INTERRUPT) {
  1196. edesc->pset[i].param.opt |= TCINTEN;
  1197. /* Also enable intermediate interrupts if necessary */
  1198. if (use_intermediate)
  1199. edesc->pset[i].param.opt |= ITCINTEN;
  1200. }
  1201. }
  1202. /* Place the cyclic channel to highest priority queue */
  1203. if (!echan->tc)
  1204. edma_assign_channel_eventq(echan, EVENTQ_0);
  1205. return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
  1206. }
  1207. static void edma_completion_handler(struct edma_chan *echan)
  1208. {
  1209. struct device *dev = echan->vchan.chan.device->dev;
  1210. struct edma_desc *edesc;
  1211. spin_lock(&echan->vchan.lock);
  1212. edesc = echan->edesc;
  1213. if (edesc) {
  1214. if (edesc->cyclic) {
  1215. vchan_cyclic_callback(&edesc->vdesc);
  1216. spin_unlock(&echan->vchan.lock);
  1217. return;
  1218. } else if (edesc->processed == edesc->pset_nr) {
  1219. edesc->residue = 0;
  1220. edma_stop(echan);
  1221. vchan_cookie_complete(&edesc->vdesc);
  1222. echan->edesc = NULL;
  1223. dev_dbg(dev, "Transfer completed on channel %d\n",
  1224. echan->ch_num);
  1225. } else {
  1226. dev_dbg(dev, "Sub transfer completed on channel %d\n",
  1227. echan->ch_num);
  1228. edma_pause(echan);
  1229. /* Update statistics for tx_status */
  1230. edesc->residue -= edesc->sg_len;
  1231. edesc->residue_stat = edesc->residue;
  1232. edesc->processed_stat = edesc->processed;
  1233. }
  1234. edma_execute(echan);
  1235. }
  1236. spin_unlock(&echan->vchan.lock);
  1237. }
  1238. /* eDMA interrupt handler */
  1239. static irqreturn_t dma_irq_handler(int irq, void *data)
  1240. {
  1241. struct edma_cc *ecc = data;
  1242. int ctlr;
  1243. u32 sh_ier;
  1244. u32 sh_ipr;
  1245. u32 bank;
  1246. ctlr = ecc->id;
  1247. if (ctlr < 0)
  1248. return IRQ_NONE;
  1249. dev_vdbg(ecc->dev, "dma_irq_handler\n");
  1250. sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
  1251. if (!sh_ipr) {
  1252. sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
  1253. if (!sh_ipr)
  1254. return IRQ_NONE;
  1255. sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
  1256. bank = 1;
  1257. } else {
  1258. sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
  1259. bank = 0;
  1260. }
  1261. do {
  1262. u32 slot;
  1263. u32 channel;
  1264. slot = __ffs(sh_ipr);
  1265. sh_ipr &= ~(BIT(slot));
  1266. if (sh_ier & BIT(slot)) {
  1267. channel = (bank << 5) | slot;
  1268. /* Clear the corresponding IPR bits */
  1269. edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
  1270. edma_completion_handler(&ecc->slave_chans[channel]);
  1271. }
  1272. } while (sh_ipr);
  1273. edma_shadow0_write(ecc, SH_IEVAL, 1);
  1274. return IRQ_HANDLED;
  1275. }
  1276. static void edma_error_handler(struct edma_chan *echan)
  1277. {
  1278. struct edma_cc *ecc = echan->ecc;
  1279. struct device *dev = echan->vchan.chan.device->dev;
  1280. struct edmacc_param p;
  1281. if (!echan->edesc)
  1282. return;
  1283. spin_lock(&echan->vchan.lock);
  1284. edma_read_slot(ecc, echan->slot[0], &p);
  1285. /*
  1286. * Issue later based on missed flag which will be sure
  1287. * to happen as:
  1288. * (1) we finished transmitting an intermediate slot and
  1289. * edma_execute is coming up.
  1290. * (2) or we finished current transfer and issue will
  1291. * call edma_execute.
  1292. *
  1293. * Important note: issuing can be dangerous here and
  1294. * lead to some nasty recursion when we are in a NULL
  1295. * slot. So we avoid doing so and set the missed flag.
  1296. */
  1297. if (p.a_b_cnt == 0 && p.ccnt == 0) {
  1298. dev_dbg(dev, "Error on null slot, setting miss\n");
  1299. echan->missed = 1;
  1300. } else {
  1301. /*
  1302. * The slot is already programmed but the event got
  1303. * missed, so its safe to issue it here.
  1304. */
  1305. dev_dbg(dev, "Missed event, TRIGGERING\n");
  1306. edma_clean_channel(echan);
  1307. edma_stop(echan);
  1308. edma_start(echan);
  1309. edma_trigger_channel(echan);
  1310. }
  1311. spin_unlock(&echan->vchan.lock);
  1312. }
  1313. static inline bool edma_error_pending(struct edma_cc *ecc)
  1314. {
  1315. if (edma_read_array(ecc, EDMA_EMR, 0) ||
  1316. edma_read_array(ecc, EDMA_EMR, 1) ||
  1317. edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
  1318. return true;
  1319. return false;
  1320. }
  1321. /* eDMA error interrupt handler */
  1322. static irqreturn_t dma_ccerr_handler(int irq, void *data)
  1323. {
  1324. struct edma_cc *ecc = data;
  1325. int i, j;
  1326. int ctlr;
  1327. unsigned int cnt = 0;
  1328. unsigned int val;
  1329. ctlr = ecc->id;
  1330. if (ctlr < 0)
  1331. return IRQ_NONE;
  1332. dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
  1333. if (!edma_error_pending(ecc)) {
  1334. /*
  1335. * The registers indicate no pending error event but the irq
  1336. * handler has been called.
  1337. * Ask eDMA to re-evaluate the error registers.
  1338. */
  1339. dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
  1340. __func__);
  1341. edma_write(ecc, EDMA_EEVAL, 1);
  1342. return IRQ_NONE;
  1343. }
  1344. while (1) {
  1345. /* Event missed register(s) */
  1346. for (j = 0; j < 2; j++) {
  1347. unsigned long emr;
  1348. val = edma_read_array(ecc, EDMA_EMR, j);
  1349. if (!val)
  1350. continue;
  1351. dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
  1352. emr = val;
  1353. for (i = find_next_bit(&emr, 32, 0); i < 32;
  1354. i = find_next_bit(&emr, 32, i + 1)) {
  1355. int k = (j << 5) + i;
  1356. /* Clear the corresponding EMR bits */
  1357. edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
  1358. /* Clear any SER */
  1359. edma_shadow0_write_array(ecc, SH_SECR, j,
  1360. BIT(i));
  1361. edma_error_handler(&ecc->slave_chans[k]);
  1362. }
  1363. }
  1364. val = edma_read(ecc, EDMA_QEMR);
  1365. if (val) {
  1366. dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
  1367. /* Not reported, just clear the interrupt reason. */
  1368. edma_write(ecc, EDMA_QEMCR, val);
  1369. edma_shadow0_write(ecc, SH_QSECR, val);
  1370. }
  1371. val = edma_read(ecc, EDMA_CCERR);
  1372. if (val) {
  1373. dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
  1374. /* Not reported, just clear the interrupt reason. */
  1375. edma_write(ecc, EDMA_CCERRCLR, val);
  1376. }
  1377. if (!edma_error_pending(ecc))
  1378. break;
  1379. cnt++;
  1380. if (cnt > 10)
  1381. break;
  1382. }
  1383. edma_write(ecc, EDMA_EEVAL, 1);
  1384. return IRQ_HANDLED;
  1385. }
  1386. /* Alloc channel resources */
  1387. static int edma_alloc_chan_resources(struct dma_chan *chan)
  1388. {
  1389. struct edma_chan *echan = to_edma_chan(chan);
  1390. struct edma_cc *ecc = echan->ecc;
  1391. struct device *dev = ecc->dev;
  1392. enum dma_event_q eventq_no = EVENTQ_DEFAULT;
  1393. int ret;
  1394. if (echan->tc) {
  1395. eventq_no = echan->tc->id;
  1396. } else if (ecc->tc_list) {
  1397. /* memcpy channel */
  1398. echan->tc = &ecc->tc_list[ecc->info->default_queue];
  1399. eventq_no = echan->tc->id;
  1400. }
  1401. ret = edma_alloc_channel(echan, eventq_no);
  1402. if (ret)
  1403. return ret;
  1404. echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
  1405. if (echan->slot[0] < 0) {
  1406. dev_err(dev, "Entry slot allocation failed for channel %u\n",
  1407. EDMA_CHAN_SLOT(echan->ch_num));
  1408. goto err_slot;
  1409. }
  1410. /* Set up channel -> slot mapping for the entry slot */
  1411. edma_set_chmap(echan, echan->slot[0]);
  1412. echan->alloced = true;
  1413. dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
  1414. EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
  1415. echan->hw_triggered ? "HW" : "SW");
  1416. return 0;
  1417. err_slot:
  1418. edma_free_channel(echan);
  1419. return ret;
  1420. }
  1421. /* Free channel resources */
  1422. static void edma_free_chan_resources(struct dma_chan *chan)
  1423. {
  1424. struct edma_chan *echan = to_edma_chan(chan);
  1425. struct device *dev = echan->ecc->dev;
  1426. int i;
  1427. /* Terminate transfers */
  1428. edma_stop(echan);
  1429. vchan_free_chan_resources(&echan->vchan);
  1430. /* Free EDMA PaRAM slots */
  1431. for (i = 0; i < EDMA_MAX_SLOTS; i++) {
  1432. if (echan->slot[i] >= 0) {
  1433. edma_free_slot(echan->ecc, echan->slot[i]);
  1434. echan->slot[i] = -1;
  1435. }
  1436. }
  1437. /* Set entry slot to the dummy slot */
  1438. edma_set_chmap(echan, echan->ecc->dummy_slot);
  1439. /* Free EDMA channel */
  1440. if (echan->alloced) {
  1441. edma_free_channel(echan);
  1442. echan->alloced = false;
  1443. }
  1444. echan->tc = NULL;
  1445. echan->hw_triggered = false;
  1446. dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
  1447. EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
  1448. }
  1449. /* Send pending descriptor to hardware */
  1450. static void edma_issue_pending(struct dma_chan *chan)
  1451. {
  1452. struct edma_chan *echan = to_edma_chan(chan);
  1453. unsigned long flags;
  1454. spin_lock_irqsave(&echan->vchan.lock, flags);
  1455. if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
  1456. edma_execute(echan);
  1457. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  1458. }
  1459. /*
  1460. * This limit exists to avoid a possible infinite loop when waiting for proof
  1461. * that a particular transfer is completed. This limit can be hit if there
  1462. * are large bursts to/from slow devices or the CPU is never able to catch
  1463. * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
  1464. * RX-FIFO, as many as 55 loops have been seen.
  1465. */
  1466. #define EDMA_MAX_TR_WAIT_LOOPS 1000
  1467. static u32 edma_residue(struct edma_desc *edesc)
  1468. {
  1469. bool dst = edesc->direction == DMA_DEV_TO_MEM;
  1470. int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
  1471. struct edma_chan *echan = edesc->echan;
  1472. struct edma_pset *pset = edesc->pset;
  1473. dma_addr_t done, pos;
  1474. int i;
  1475. /*
  1476. * We always read the dst/src position from the first RamPar
  1477. * pset. That's the one which is active now.
  1478. */
  1479. pos = edma_get_position(echan->ecc, echan->slot[0], dst);
  1480. /*
  1481. * "pos" may represent a transfer request that is still being
  1482. * processed by the EDMACC or EDMATC. We will busy wait until
  1483. * any one of the situations occurs:
  1484. * 1. the DMA hardware is idle
  1485. * 2. a new transfer request is setup
  1486. * 3. we hit the loop limit
  1487. */
  1488. while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
  1489. /* check if a new transfer request is setup */
  1490. if (edma_get_position(echan->ecc,
  1491. echan->slot[0], dst) != pos) {
  1492. break;
  1493. }
  1494. if (!--loop_count) {
  1495. dev_dbg_ratelimited(echan->vchan.chan.device->dev,
  1496. "%s: timeout waiting for PaRAM update\n",
  1497. __func__);
  1498. break;
  1499. }
  1500. cpu_relax();
  1501. }
  1502. /*
  1503. * Cyclic is simple. Just subtract pset[0].addr from pos.
  1504. *
  1505. * We never update edesc->residue in the cyclic case, so we
  1506. * can tell the remaining room to the end of the circular
  1507. * buffer.
  1508. */
  1509. if (edesc->cyclic) {
  1510. done = pos - pset->addr;
  1511. edesc->residue_stat = edesc->residue - done;
  1512. return edesc->residue_stat;
  1513. }
  1514. /*
  1515. * For SG operation we catch up with the last processed
  1516. * status.
  1517. */
  1518. pset += edesc->processed_stat;
  1519. for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
  1520. /*
  1521. * If we are inside this pset address range, we know
  1522. * this is the active one. Get the current delta and
  1523. * stop walking the psets.
  1524. */
  1525. if (pos >= pset->addr && pos < pset->addr + pset->len)
  1526. return edesc->residue_stat - (pos - pset->addr);
  1527. /* Otherwise mark it done and update residue_stat. */
  1528. edesc->processed_stat++;
  1529. edesc->residue_stat -= pset->len;
  1530. }
  1531. return edesc->residue_stat;
  1532. }
  1533. /* Check request completion status */
  1534. static enum dma_status edma_tx_status(struct dma_chan *chan,
  1535. dma_cookie_t cookie,
  1536. struct dma_tx_state *txstate)
  1537. {
  1538. struct edma_chan *echan = to_edma_chan(chan);
  1539. struct virt_dma_desc *vdesc;
  1540. enum dma_status ret;
  1541. unsigned long flags;
  1542. ret = dma_cookie_status(chan, cookie, txstate);
  1543. if (ret == DMA_COMPLETE || !txstate)
  1544. return ret;
  1545. spin_lock_irqsave(&echan->vchan.lock, flags);
  1546. if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
  1547. txstate->residue = edma_residue(echan->edesc);
  1548. else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
  1549. txstate->residue = to_edma_desc(&vdesc->tx)->residue;
  1550. spin_unlock_irqrestore(&echan->vchan.lock, flags);
  1551. return ret;
  1552. }
  1553. static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
  1554. {
  1555. if (!memcpy_channels)
  1556. return false;
  1557. while (*memcpy_channels != -1) {
  1558. if (*memcpy_channels == ch_num)
  1559. return true;
  1560. memcpy_channels++;
  1561. }
  1562. return false;
  1563. }
  1564. #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  1565. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  1566. BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
  1567. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  1568. static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
  1569. {
  1570. struct dma_device *s_ddev = &ecc->dma_slave;
  1571. struct dma_device *m_ddev = NULL;
  1572. s32 *memcpy_channels = ecc->info->memcpy_channels;
  1573. int i, j;
  1574. dma_cap_zero(s_ddev->cap_mask);
  1575. dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
  1576. dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
  1577. if (ecc->legacy_mode && !memcpy_channels) {
  1578. dev_warn(ecc->dev,
  1579. "Legacy memcpy is enabled, things might not work\n");
  1580. dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
  1581. s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
  1582. s_ddev->directions = BIT(DMA_MEM_TO_MEM);
  1583. }
  1584. s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
  1585. s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
  1586. s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
  1587. s_ddev->device_free_chan_resources = edma_free_chan_resources;
  1588. s_ddev->device_issue_pending = edma_issue_pending;
  1589. s_ddev->device_tx_status = edma_tx_status;
  1590. s_ddev->device_config = edma_slave_config;
  1591. s_ddev->device_pause = edma_dma_pause;
  1592. s_ddev->device_resume = edma_dma_resume;
  1593. s_ddev->device_terminate_all = edma_terminate_all;
  1594. s_ddev->device_synchronize = edma_synchronize;
  1595. s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
  1596. s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
  1597. s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
  1598. s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1599. s_ddev->dev = ecc->dev;
  1600. INIT_LIST_HEAD(&s_ddev->channels);
  1601. if (memcpy_channels) {
  1602. m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
  1603. ecc->dma_memcpy = m_ddev;
  1604. dma_cap_zero(m_ddev->cap_mask);
  1605. dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
  1606. m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
  1607. m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
  1608. m_ddev->device_free_chan_resources = edma_free_chan_resources;
  1609. m_ddev->device_issue_pending = edma_issue_pending;
  1610. m_ddev->device_tx_status = edma_tx_status;
  1611. m_ddev->device_config = edma_slave_config;
  1612. m_ddev->device_pause = edma_dma_pause;
  1613. m_ddev->device_resume = edma_dma_resume;
  1614. m_ddev->device_terminate_all = edma_terminate_all;
  1615. m_ddev->device_synchronize = edma_synchronize;
  1616. m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
  1617. m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
  1618. m_ddev->directions = BIT(DMA_MEM_TO_MEM);
  1619. m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1620. m_ddev->dev = ecc->dev;
  1621. INIT_LIST_HEAD(&m_ddev->channels);
  1622. } else if (!ecc->legacy_mode) {
  1623. dev_info(ecc->dev, "memcpy is disabled\n");
  1624. }
  1625. for (i = 0; i < ecc->num_channels; i++) {
  1626. struct edma_chan *echan = &ecc->slave_chans[i];
  1627. echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
  1628. echan->ecc = ecc;
  1629. echan->vchan.desc_free = edma_desc_free;
  1630. if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
  1631. vchan_init(&echan->vchan, m_ddev);
  1632. else
  1633. vchan_init(&echan->vchan, s_ddev);
  1634. INIT_LIST_HEAD(&echan->node);
  1635. for (j = 0; j < EDMA_MAX_SLOTS; j++)
  1636. echan->slot[j] = -1;
  1637. }
  1638. }
  1639. static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
  1640. struct edma_cc *ecc)
  1641. {
  1642. int i;
  1643. u32 value, cccfg;
  1644. s8 (*queue_priority_map)[2];
  1645. /* Decode the eDMA3 configuration from CCCFG register */
  1646. cccfg = edma_read(ecc, EDMA_CCCFG);
  1647. value = GET_NUM_REGN(cccfg);
  1648. ecc->num_region = BIT(value);
  1649. value = GET_NUM_DMACH(cccfg);
  1650. ecc->num_channels = BIT(value + 1);
  1651. value = GET_NUM_QDMACH(cccfg);
  1652. ecc->num_qchannels = value * 2;
  1653. value = GET_NUM_PAENTRY(cccfg);
  1654. ecc->num_slots = BIT(value + 4);
  1655. value = GET_NUM_EVQUE(cccfg);
  1656. ecc->num_tc = value + 1;
  1657. ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
  1658. dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
  1659. dev_dbg(dev, "num_region: %u\n", ecc->num_region);
  1660. dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
  1661. dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
  1662. dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
  1663. dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
  1664. dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
  1665. /* Nothing need to be done if queue priority is provided */
  1666. if (pdata->queue_priority_mapping)
  1667. return 0;
  1668. /*
  1669. * Configure TC/queue priority as follows:
  1670. * Q0 - priority 0
  1671. * Q1 - priority 1
  1672. * Q2 - priority 2
  1673. * ...
  1674. * The meaning of priority numbers: 0 highest priority, 7 lowest
  1675. * priority. So Q0 is the highest priority queue and the last queue has
  1676. * the lowest priority.
  1677. */
  1678. queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
  1679. GFP_KERNEL);
  1680. if (!queue_priority_map)
  1681. return -ENOMEM;
  1682. for (i = 0; i < ecc->num_tc; i++) {
  1683. queue_priority_map[i][0] = i;
  1684. queue_priority_map[i][1] = i;
  1685. }
  1686. queue_priority_map[i][0] = -1;
  1687. queue_priority_map[i][1] = -1;
  1688. pdata->queue_priority_mapping = queue_priority_map;
  1689. /* Default queue has the lowest priority */
  1690. pdata->default_queue = i - 1;
  1691. return 0;
  1692. }
  1693. #if IS_ENABLED(CONFIG_OF)
  1694. static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
  1695. size_t sz)
  1696. {
  1697. const char pname[] = "ti,edma-xbar-event-map";
  1698. struct resource res;
  1699. void __iomem *xbar;
  1700. s16 (*xbar_chans)[2];
  1701. size_t nelm = sz / sizeof(s16);
  1702. u32 shift, offset, mux;
  1703. int ret, i;
  1704. xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
  1705. if (!xbar_chans)
  1706. return -ENOMEM;
  1707. ret = of_address_to_resource(dev->of_node, 1, &res);
  1708. if (ret)
  1709. return -ENOMEM;
  1710. xbar = devm_ioremap(dev, res.start, resource_size(&res));
  1711. if (!xbar)
  1712. return -ENOMEM;
  1713. ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
  1714. nelm);
  1715. if (ret)
  1716. return -EIO;
  1717. /* Invalidate last entry for the other user of this mess */
  1718. nelm >>= 1;
  1719. xbar_chans[nelm][0] = -1;
  1720. xbar_chans[nelm][1] = -1;
  1721. for (i = 0; i < nelm; i++) {
  1722. shift = (xbar_chans[i][1] & 0x03) << 3;
  1723. offset = xbar_chans[i][1] & 0xfffffffc;
  1724. mux = readl(xbar + offset);
  1725. mux &= ~(0xff << shift);
  1726. mux |= xbar_chans[i][0] << shift;
  1727. writel(mux, (xbar + offset));
  1728. }
  1729. pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
  1730. return 0;
  1731. }
  1732. static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
  1733. bool legacy_mode)
  1734. {
  1735. struct edma_soc_info *info;
  1736. struct property *prop;
  1737. size_t sz;
  1738. int ret;
  1739. info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
  1740. if (!info)
  1741. return ERR_PTR(-ENOMEM);
  1742. if (legacy_mode) {
  1743. prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
  1744. &sz);
  1745. if (prop) {
  1746. ret = edma_xbar_event_map(dev, info, sz);
  1747. if (ret)
  1748. return ERR_PTR(ret);
  1749. }
  1750. return info;
  1751. }
  1752. /* Get the list of channels allocated to be used for memcpy */
  1753. prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
  1754. if (prop) {
  1755. const char pname[] = "ti,edma-memcpy-channels";
  1756. size_t nelm = sz / sizeof(s32);
  1757. s32 *memcpy_ch;
  1758. memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
  1759. GFP_KERNEL);
  1760. if (!memcpy_ch)
  1761. return ERR_PTR(-ENOMEM);
  1762. ret = of_property_read_u32_array(dev->of_node, pname,
  1763. (u32 *)memcpy_ch, nelm);
  1764. if (ret)
  1765. return ERR_PTR(ret);
  1766. memcpy_ch[nelm] = -1;
  1767. info->memcpy_channels = memcpy_ch;
  1768. }
  1769. prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
  1770. &sz);
  1771. if (prop) {
  1772. const char pname[] = "ti,edma-reserved-slot-ranges";
  1773. u32 (*tmp)[2];
  1774. s16 (*rsv_slots)[2];
  1775. size_t nelm = sz / sizeof(*tmp);
  1776. struct edma_rsv_info *rsv_info;
  1777. int i;
  1778. if (!nelm)
  1779. return info;
  1780. tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
  1781. if (!tmp)
  1782. return ERR_PTR(-ENOMEM);
  1783. rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
  1784. if (!rsv_info) {
  1785. kfree(tmp);
  1786. return ERR_PTR(-ENOMEM);
  1787. }
  1788. rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
  1789. GFP_KERNEL);
  1790. if (!rsv_slots) {
  1791. kfree(tmp);
  1792. return ERR_PTR(-ENOMEM);
  1793. }
  1794. ret = of_property_read_u32_array(dev->of_node, pname,
  1795. (u32 *)tmp, nelm * 2);
  1796. if (ret) {
  1797. kfree(tmp);
  1798. return ERR_PTR(ret);
  1799. }
  1800. for (i = 0; i < nelm; i++) {
  1801. rsv_slots[i][0] = tmp[i][0];
  1802. rsv_slots[i][1] = tmp[i][1];
  1803. }
  1804. rsv_slots[nelm][0] = -1;
  1805. rsv_slots[nelm][1] = -1;
  1806. info->rsv = rsv_info;
  1807. info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
  1808. kfree(tmp);
  1809. }
  1810. return info;
  1811. }
  1812. static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
  1813. struct of_dma *ofdma)
  1814. {
  1815. struct edma_cc *ecc = ofdma->of_dma_data;
  1816. struct dma_chan *chan = NULL;
  1817. struct edma_chan *echan;
  1818. int i;
  1819. if (!ecc || dma_spec->args_count < 1)
  1820. return NULL;
  1821. for (i = 0; i < ecc->num_channels; i++) {
  1822. echan = &ecc->slave_chans[i];
  1823. if (echan->ch_num == dma_spec->args[0]) {
  1824. chan = &echan->vchan.chan;
  1825. break;
  1826. }
  1827. }
  1828. if (!chan)
  1829. return NULL;
  1830. if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
  1831. goto out;
  1832. if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
  1833. dma_spec->args[1] < echan->ecc->num_tc) {
  1834. echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
  1835. goto out;
  1836. }
  1837. return NULL;
  1838. out:
  1839. /* The channel is going to be used as HW synchronized */
  1840. echan->hw_triggered = true;
  1841. return dma_get_slave_channel(chan);
  1842. }
  1843. #else
  1844. static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
  1845. bool legacy_mode)
  1846. {
  1847. return ERR_PTR(-EINVAL);
  1848. }
  1849. static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
  1850. struct of_dma *ofdma)
  1851. {
  1852. return NULL;
  1853. }
  1854. #endif
  1855. static int edma_probe(struct platform_device *pdev)
  1856. {
  1857. struct edma_soc_info *info = pdev->dev.platform_data;
  1858. s8 (*queue_priority_mapping)[2];
  1859. int i, off, ln;
  1860. const s16 (*rsv_slots)[2];
  1861. const s16 (*xbar_chans)[2];
  1862. int irq;
  1863. char *irq_name;
  1864. struct resource *mem;
  1865. struct device_node *node = pdev->dev.of_node;
  1866. struct device *dev = &pdev->dev;
  1867. struct edma_cc *ecc;
  1868. bool legacy_mode = true;
  1869. int ret;
  1870. if (node) {
  1871. const struct of_device_id *match;
  1872. match = of_match_node(edma_of_ids, node);
  1873. if (match && (u32)match->data == EDMA_BINDING_TPCC)
  1874. legacy_mode = false;
  1875. info = edma_setup_info_from_dt(dev, legacy_mode);
  1876. if (IS_ERR(info)) {
  1877. dev_err(dev, "failed to get DT data\n");
  1878. return PTR_ERR(info);
  1879. }
  1880. }
  1881. if (!info)
  1882. return -ENODEV;
  1883. pm_runtime_enable(dev);
  1884. ret = pm_runtime_get_sync(dev);
  1885. if (ret < 0) {
  1886. dev_err(dev, "pm_runtime_get_sync() failed\n");
  1887. return ret;
  1888. }
  1889. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  1890. if (ret)
  1891. return ret;
  1892. ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
  1893. if (!ecc)
  1894. return -ENOMEM;
  1895. ecc->dev = dev;
  1896. ecc->id = pdev->id;
  1897. ecc->legacy_mode = legacy_mode;
  1898. /* When booting with DT the pdev->id is -1 */
  1899. if (ecc->id < 0)
  1900. ecc->id = 0;
  1901. mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
  1902. if (!mem) {
  1903. dev_dbg(dev, "mem resource not found, using index 0\n");
  1904. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1905. if (!mem) {
  1906. dev_err(dev, "no mem resource?\n");
  1907. return -ENODEV;
  1908. }
  1909. }
  1910. ecc->base = devm_ioremap_resource(dev, mem);
  1911. if (IS_ERR(ecc->base))
  1912. return PTR_ERR(ecc->base);
  1913. platform_set_drvdata(pdev, ecc);
  1914. /* Get eDMA3 configuration from IP */
  1915. ret = edma_setup_from_hw(dev, info, ecc);
  1916. if (ret)
  1917. return ret;
  1918. /* Allocate memory based on the information we got from the IP */
  1919. ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
  1920. sizeof(*ecc->slave_chans), GFP_KERNEL);
  1921. if (!ecc->slave_chans)
  1922. return -ENOMEM;
  1923. ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
  1924. sizeof(unsigned long), GFP_KERNEL);
  1925. if (!ecc->slot_inuse)
  1926. return -ENOMEM;
  1927. ecc->default_queue = info->default_queue;
  1928. for (i = 0; i < ecc->num_slots; i++)
  1929. edma_write_slot(ecc, i, &dummy_paramset);
  1930. if (info->rsv) {
  1931. /* Set the reserved slots in inuse list */
  1932. rsv_slots = info->rsv->rsv_slots;
  1933. if (rsv_slots) {
  1934. for (i = 0; rsv_slots[i][0] != -1; i++) {
  1935. off = rsv_slots[i][0];
  1936. ln = rsv_slots[i][1];
  1937. set_bits(off, ln, ecc->slot_inuse);
  1938. }
  1939. }
  1940. }
  1941. /* Clear the xbar mapped channels in unused list */
  1942. xbar_chans = info->xbar_chans;
  1943. if (xbar_chans) {
  1944. for (i = 0; xbar_chans[i][1] != -1; i++) {
  1945. off = xbar_chans[i][1];
  1946. }
  1947. }
  1948. irq = platform_get_irq_byname(pdev, "edma3_ccint");
  1949. if (irq < 0 && node)
  1950. irq = irq_of_parse_and_map(node, 0);
  1951. if (irq >= 0) {
  1952. irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
  1953. dev_name(dev));
  1954. ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
  1955. ecc);
  1956. if (ret) {
  1957. dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
  1958. return ret;
  1959. }
  1960. ecc->ccint = irq;
  1961. }
  1962. irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
  1963. if (irq < 0 && node)
  1964. irq = irq_of_parse_and_map(node, 2);
  1965. if (irq >= 0) {
  1966. irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
  1967. dev_name(dev));
  1968. ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
  1969. ecc);
  1970. if (ret) {
  1971. dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
  1972. return ret;
  1973. }
  1974. ecc->ccerrint = irq;
  1975. }
  1976. ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
  1977. if (ecc->dummy_slot < 0) {
  1978. dev_err(dev, "Can't allocate PaRAM dummy slot\n");
  1979. return ecc->dummy_slot;
  1980. }
  1981. queue_priority_mapping = info->queue_priority_mapping;
  1982. if (!ecc->legacy_mode) {
  1983. int lowest_priority = 0;
  1984. struct of_phandle_args tc_args;
  1985. ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
  1986. sizeof(*ecc->tc_list), GFP_KERNEL);
  1987. if (!ecc->tc_list)
  1988. return -ENOMEM;
  1989. for (i = 0;; i++) {
  1990. ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
  1991. 1, i, &tc_args);
  1992. if (ret || i == ecc->num_tc)
  1993. break;
  1994. ecc->tc_list[i].node = tc_args.np;
  1995. ecc->tc_list[i].id = i;
  1996. queue_priority_mapping[i][1] = tc_args.args[0];
  1997. if (queue_priority_mapping[i][1] > lowest_priority) {
  1998. lowest_priority = queue_priority_mapping[i][1];
  1999. info->default_queue = i;
  2000. }
  2001. }
  2002. }
  2003. /* Event queue priority mapping */
  2004. for (i = 0; queue_priority_mapping[i][0] != -1; i++)
  2005. edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
  2006. queue_priority_mapping[i][1]);
  2007. for (i = 0; i < ecc->num_region; i++) {
  2008. edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
  2009. edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
  2010. edma_write_array(ecc, EDMA_QRAE, i, 0x0);
  2011. }
  2012. ecc->info = info;
  2013. /* Init the dma device and channels */
  2014. edma_dma_init(ecc, legacy_mode);
  2015. for (i = 0; i < ecc->num_channels; i++) {
  2016. /* Assign all channels to the default queue */
  2017. edma_assign_channel_eventq(&ecc->slave_chans[i],
  2018. info->default_queue);
  2019. /* Set entry slot to the dummy slot */
  2020. edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
  2021. }
  2022. ecc->dma_slave.filter.map = info->slave_map;
  2023. ecc->dma_slave.filter.mapcnt = info->slavecnt;
  2024. ecc->dma_slave.filter.fn = edma_filter_fn;
  2025. ret = dma_async_device_register(&ecc->dma_slave);
  2026. if (ret) {
  2027. dev_err(dev, "slave ddev registration failed (%d)\n", ret);
  2028. goto err_reg1;
  2029. }
  2030. if (ecc->dma_memcpy) {
  2031. ret = dma_async_device_register(ecc->dma_memcpy);
  2032. if (ret) {
  2033. dev_err(dev, "memcpy ddev registration failed (%d)\n",
  2034. ret);
  2035. dma_async_device_unregister(&ecc->dma_slave);
  2036. goto err_reg1;
  2037. }
  2038. }
  2039. if (node)
  2040. of_dma_controller_register(node, of_edma_xlate, ecc);
  2041. dev_info(dev, "TI EDMA DMA engine driver\n");
  2042. return 0;
  2043. err_reg1:
  2044. edma_free_slot(ecc, ecc->dummy_slot);
  2045. return ret;
  2046. }
  2047. static void edma_cleanupp_vchan(struct dma_device *dmadev)
  2048. {
  2049. struct edma_chan *echan, *_echan;
  2050. list_for_each_entry_safe(echan, _echan,
  2051. &dmadev->channels, vchan.chan.device_node) {
  2052. list_del(&echan->vchan.chan.device_node);
  2053. tasklet_kill(&echan->vchan.task);
  2054. }
  2055. }
  2056. static int edma_remove(struct platform_device *pdev)
  2057. {
  2058. struct device *dev = &pdev->dev;
  2059. struct edma_cc *ecc = dev_get_drvdata(dev);
  2060. devm_free_irq(dev, ecc->ccint, ecc);
  2061. devm_free_irq(dev, ecc->ccerrint, ecc);
  2062. edma_cleanupp_vchan(&ecc->dma_slave);
  2063. if (dev->of_node)
  2064. of_dma_controller_free(dev->of_node);
  2065. dma_async_device_unregister(&ecc->dma_slave);
  2066. if (ecc->dma_memcpy)
  2067. dma_async_device_unregister(ecc->dma_memcpy);
  2068. edma_free_slot(ecc, ecc->dummy_slot);
  2069. return 0;
  2070. }
  2071. #ifdef CONFIG_PM_SLEEP
  2072. static int edma_pm_suspend(struct device *dev)
  2073. {
  2074. struct edma_cc *ecc = dev_get_drvdata(dev);
  2075. struct edma_chan *echan = ecc->slave_chans;
  2076. int i;
  2077. for (i = 0; i < ecc->num_channels; i++) {
  2078. if (echan[i].alloced)
  2079. edma_setup_interrupt(&echan[i], false);
  2080. }
  2081. return 0;
  2082. }
  2083. static int edma_pm_resume(struct device *dev)
  2084. {
  2085. struct edma_cc *ecc = dev_get_drvdata(dev);
  2086. struct edma_chan *echan = ecc->slave_chans;
  2087. int i;
  2088. s8 (*queue_priority_mapping)[2];
  2089. queue_priority_mapping = ecc->info->queue_priority_mapping;
  2090. /* Event queue priority mapping */
  2091. for (i = 0; queue_priority_mapping[i][0] != -1; i++)
  2092. edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
  2093. queue_priority_mapping[i][1]);
  2094. for (i = 0; i < ecc->num_channels; i++) {
  2095. if (echan[i].alloced) {
  2096. /* ensure access through shadow region 0 */
  2097. edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
  2098. BIT(i & 0x1f));
  2099. edma_setup_interrupt(&echan[i], true);
  2100. /* Set up channel -> slot mapping for the entry slot */
  2101. edma_set_chmap(&echan[i], echan[i].slot[0]);
  2102. }
  2103. }
  2104. return 0;
  2105. }
  2106. #endif
  2107. static const struct dev_pm_ops edma_pm_ops = {
  2108. SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
  2109. };
  2110. static struct platform_driver edma_driver = {
  2111. .probe = edma_probe,
  2112. .remove = edma_remove,
  2113. .driver = {
  2114. .name = "edma",
  2115. .pm = &edma_pm_ops,
  2116. .of_match_table = edma_of_ids,
  2117. },
  2118. };
  2119. static int edma_tptc_probe(struct platform_device *pdev)
  2120. {
  2121. pm_runtime_enable(&pdev->dev);
  2122. return pm_runtime_get_sync(&pdev->dev);
  2123. }
  2124. static struct platform_driver edma_tptc_driver = {
  2125. .probe = edma_tptc_probe,
  2126. .driver = {
  2127. .name = "edma3-tptc",
  2128. .of_match_table = edma_tptc_of_ids,
  2129. },
  2130. };
  2131. bool edma_filter_fn(struct dma_chan *chan, void *param)
  2132. {
  2133. bool match = false;
  2134. if (chan->device->dev->driver == &edma_driver.driver) {
  2135. struct edma_chan *echan = to_edma_chan(chan);
  2136. unsigned ch_req = *(unsigned *)param;
  2137. if (ch_req == echan->ch_num) {
  2138. /* The channel is going to be used as HW synchronized */
  2139. echan->hw_triggered = true;
  2140. match = true;
  2141. }
  2142. }
  2143. return match;
  2144. }
  2145. EXPORT_SYMBOL(edma_filter_fn);
  2146. static int edma_init(void)
  2147. {
  2148. int ret;
  2149. ret = platform_driver_register(&edma_tptc_driver);
  2150. if (ret)
  2151. return ret;
  2152. return platform_driver_register(&edma_driver);
  2153. }
  2154. subsys_initcall(edma_init);
  2155. static void __exit edma_exit(void)
  2156. {
  2157. platform_driver_unregister(&edma_driver);
  2158. platform_driver_unregister(&edma_tptc_driver);
  2159. }
  2160. module_exit(edma_exit);
  2161. MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
  2162. MODULE_DESCRIPTION("TI EDMA DMA engine driver");
  2163. MODULE_LICENSE("GPL v2");