owl-dma.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Actions Semi Owl SoCs DMA driver
  4. //
  5. // Copyright (c) 2014 Actions Semi Inc.
  6. // Author: David Liu <liuwei@actions-semi.com>
  7. //
  8. // Copyright (c) 2018 Linaro Ltd.
  9. // Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
  10. #include <linux/bitops.h>
  11. #include <linux/clk.h>
  12. #include <linux/delay.h>
  13. #include <linux/dmaengine.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/dmapool.h>
  16. #include <linux/err.h>
  17. #include <linux/init.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/io.h>
  20. #include <linux/mm.h>
  21. #include <linux/module.h>
  22. #include <linux/of_device.h>
  23. #include <linux/of_dma.h>
  24. #include <linux/slab.h>
  25. #include "virt-dma.h"
  26. #define OWL_DMA_FRAME_MAX_LENGTH 0xfffff
  27. /* Global DMA Controller Registers */
  28. #define OWL_DMA_IRQ_PD0 0x00
  29. #define OWL_DMA_IRQ_PD1 0x04
  30. #define OWL_DMA_IRQ_PD2 0x08
  31. #define OWL_DMA_IRQ_PD3 0x0C
  32. #define OWL_DMA_IRQ_EN0 0x10
  33. #define OWL_DMA_IRQ_EN1 0x14
  34. #define OWL_DMA_IRQ_EN2 0x18
  35. #define OWL_DMA_IRQ_EN3 0x1C
  36. #define OWL_DMA_SECURE_ACCESS_CTL 0x20
  37. #define OWL_DMA_NIC_QOS 0x24
  38. #define OWL_DMA_DBGSEL 0x28
  39. #define OWL_DMA_IDLE_STAT 0x2C
  40. /* Channel Registers */
  41. #define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100)
  42. #define OWL_DMAX_MODE 0x00
  43. #define OWL_DMAX_SOURCE 0x04
  44. #define OWL_DMAX_DESTINATION 0x08
  45. #define OWL_DMAX_FRAME_LEN 0x0C
  46. #define OWL_DMAX_FRAME_CNT 0x10
  47. #define OWL_DMAX_REMAIN_FRAME_CNT 0x14
  48. #define OWL_DMAX_REMAIN_CNT 0x18
  49. #define OWL_DMAX_SOURCE_STRIDE 0x1C
  50. #define OWL_DMAX_DESTINATION_STRIDE 0x20
  51. #define OWL_DMAX_START 0x24
  52. #define OWL_DMAX_PAUSE 0x28
  53. #define OWL_DMAX_CHAINED_CTL 0x2C
  54. #define OWL_DMAX_CONSTANT 0x30
  55. #define OWL_DMAX_LINKLIST_CTL 0x34
  56. #define OWL_DMAX_NEXT_DESCRIPTOR 0x38
  57. #define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C
  58. #define OWL_DMAX_INT_CTL 0x40
  59. #define OWL_DMAX_INT_STATUS 0x44
  60. #define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48
  61. #define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C
  62. /* OWL_DMAX_MODE Bits */
  63. #define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0)
  64. #define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8)
  65. #define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0)
  66. #define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2)
  67. #define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3)
  68. #define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10)
  69. #define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0)
  70. #define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2)
  71. #define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3)
  72. #define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16)
  73. #define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0)
  74. #define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1)
  75. #define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2)
  76. #define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18)
  77. #define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0)
  78. #define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1)
  79. #define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2)
  80. #define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20)
  81. #define OWL_DMA_MODE_CB BIT(23)
  82. #define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28)
  83. #define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0)
  84. #define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1)
  85. #define OWL_DMA_MODE_CFE BIT(29)
  86. #define OWL_DMA_MODE_LME BIT(30)
  87. #define OWL_DMA_MODE_CME BIT(31)
  88. /* OWL_DMAX_LINKLIST_CTL Bits */
  89. #define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8)
  90. #define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0)
  91. #define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1)
  92. #define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2)
  93. #define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10)
  94. #define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0)
  95. #define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1)
  96. #define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2)
  97. #define OWL_DMA_LLC_SUSPEND BIT(16)
  98. /* OWL_DMAX_INT_CTL Bits */
  99. #define OWL_DMA_INTCTL_BLOCK BIT(0)
  100. #define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1)
  101. #define OWL_DMA_INTCTL_FRAME BIT(2)
  102. #define OWL_DMA_INTCTL_HALF_FRAME BIT(3)
  103. #define OWL_DMA_INTCTL_LAST_FRAME BIT(4)
  104. /* OWL_DMAX_INT_STATUS Bits */
  105. #define OWL_DMA_INTSTAT_BLOCK BIT(0)
  106. #define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1)
  107. #define OWL_DMA_INTSTAT_FRAME BIT(2)
  108. #define OWL_DMA_INTSTAT_HALF_FRAME BIT(3)
  109. #define OWL_DMA_INTSTAT_LAST_FRAME BIT(4)
  110. /* Pack shift and newshift in a single word */
  111. #define BIT_FIELD(val, width, shift, newshift) \
  112. ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift))
  113. /**
  114. * struct owl_dma_lli_hw - Hardware link list for dma transfer
  115. * @next_lli: physical address of the next link list
  116. * @saddr: source physical address
  117. * @daddr: destination physical address
  118. * @flen: frame length
  119. * @fcnt: frame count
  120. * @src_stride: source stride
  121. * @dst_stride: destination stride
  122. * @ctrla: dma_mode and linklist ctrl config
  123. * @ctrlb: interrupt config
  124. * @const_num: data for constant fill
  125. */
  126. struct owl_dma_lli_hw {
  127. u32 next_lli;
  128. u32 saddr;
  129. u32 daddr;
  130. u32 flen:20;
  131. u32 fcnt:12;
  132. u32 src_stride;
  133. u32 dst_stride;
  134. u32 ctrla;
  135. u32 ctrlb;
  136. u32 const_num;
  137. };
  138. /**
  139. * struct owl_dma_lli - Link list for dma transfer
  140. * @hw: hardware link list
  141. * @phys: physical address of hardware link list
  142. * @node: node for txd's lli_list
  143. */
  144. struct owl_dma_lli {
  145. struct owl_dma_lli_hw hw;
  146. dma_addr_t phys;
  147. struct list_head node;
  148. };
  149. /**
  150. * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
  151. * @vd: virtual DMA descriptor
  152. * @lli_list: link list of lli nodes
  153. * @cyclic: flag to indicate cyclic transfers
  154. */
  155. struct owl_dma_txd {
  156. struct virt_dma_desc vd;
  157. struct list_head lli_list;
  158. bool cyclic;
  159. };
  160. /**
  161. * struct owl_dma_pchan - Holder for the physical channels
  162. * @id: physical index to this channel
  163. * @base: virtual memory base for the dma channel
  164. * @vchan: the virtual channel currently being served by this physical channel
  165. * @lock: a lock to use when altering an instance of this struct
  166. */
  167. struct owl_dma_pchan {
  168. u32 id;
  169. void __iomem *base;
  170. struct owl_dma_vchan *vchan;
  171. spinlock_t lock;
  172. };
  173. /**
  174. * struct owl_dma_pchan - Wrapper for DMA ENGINE channel
  175. * @vc: wrappped virtual channel
  176. * @pchan: the physical channel utilized by this channel
  177. * @txd: active transaction on this channel
  178. * @cfg: slave configuration for this channel
  179. * @drq: physical DMA request ID for this channel
  180. */
  181. struct owl_dma_vchan {
  182. struct virt_dma_chan vc;
  183. struct owl_dma_pchan *pchan;
  184. struct owl_dma_txd *txd;
  185. struct dma_slave_config cfg;
  186. u8 drq;
  187. };
  188. /**
  189. * struct owl_dma - Holder for the Owl DMA controller
  190. * @dma: dma engine for this instance
  191. * @base: virtual memory base for the DMA controller
  192. * @clk: clock for the DMA controller
  193. * @lock: a lock to use when change DMA controller global register
  194. * @lli_pool: a pool for the LLI descriptors
  195. * @irq: interrupt ID for the DMA controller
  196. * @nr_pchans: the number of physical channels
  197. * @pchans: array of data for the physical channels
  198. * @nr_vchans: the number of physical channels
  199. * @vchans: array of data for the physical channels
  200. */
  201. struct owl_dma {
  202. struct dma_device dma;
  203. void __iomem *base;
  204. struct clk *clk;
  205. spinlock_t lock;
  206. struct dma_pool *lli_pool;
  207. int irq;
  208. unsigned int nr_pchans;
  209. struct owl_dma_pchan *pchans;
  210. unsigned int nr_vchans;
  211. struct owl_dma_vchan *vchans;
  212. };
  213. static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
  214. u32 val, bool state)
  215. {
  216. u32 regval;
  217. regval = readl(pchan->base + reg);
  218. if (state)
  219. regval |= val;
  220. else
  221. regval &= ~val;
  222. writel(val, pchan->base + reg);
  223. }
  224. static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
  225. {
  226. writel(data, pchan->base + reg);
  227. }
  228. static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg)
  229. {
  230. return readl(pchan->base + reg);
  231. }
  232. static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
  233. {
  234. u32 regval;
  235. regval = readl(od->base + reg);
  236. if (state)
  237. regval |= val;
  238. else
  239. regval &= ~val;
  240. writel(val, od->base + reg);
  241. }
  242. static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
  243. {
  244. writel(data, od->base + reg);
  245. }
  246. static u32 dma_readl(struct owl_dma *od, u32 reg)
  247. {
  248. return readl(od->base + reg);
  249. }
  250. static inline struct owl_dma *to_owl_dma(struct dma_device *dd)
  251. {
  252. return container_of(dd, struct owl_dma, dma);
  253. }
  254. static struct device *chan2dev(struct dma_chan *chan)
  255. {
  256. return &chan->dev->device;
  257. }
  258. static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan)
  259. {
  260. return container_of(chan, struct owl_dma_vchan, vc.chan);
  261. }
  262. static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx)
  263. {
  264. return container_of(tx, struct owl_dma_txd, vd.tx);
  265. }
  266. static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl)
  267. {
  268. u32 ctl;
  269. ctl = BIT_FIELD(mode, 4, 28, 28) |
  270. BIT_FIELD(mode, 8, 16, 20) |
  271. BIT_FIELD(mode, 4, 8, 16) |
  272. BIT_FIELD(mode, 6, 0, 10) |
  273. BIT_FIELD(llc_ctl, 2, 10, 8) |
  274. BIT_FIELD(llc_ctl, 2, 8, 6);
  275. return ctl;
  276. }
  277. static inline u32 llc_hw_ctrlb(u32 int_ctl)
  278. {
  279. u32 ctl;
  280. ctl = BIT_FIELD(int_ctl, 7, 0, 18);
  281. return ctl;
  282. }
  283. static void owl_dma_free_lli(struct owl_dma *od,
  284. struct owl_dma_lli *lli)
  285. {
  286. list_del(&lli->node);
  287. dma_pool_free(od->lli_pool, lli, lli->phys);
  288. }
  289. static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
  290. {
  291. struct owl_dma_lli *lli;
  292. dma_addr_t phys;
  293. lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys);
  294. if (!lli)
  295. return NULL;
  296. INIT_LIST_HEAD(&lli->node);
  297. lli->phys = phys;
  298. return lli;
  299. }
  300. static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
  301. struct owl_dma_lli *prev,
  302. struct owl_dma_lli *next,
  303. bool is_cyclic)
  304. {
  305. if (!is_cyclic)
  306. list_add_tail(&next->node, &txd->lli_list);
  307. if (prev) {
  308. prev->hw.next_lli = next->phys;
  309. prev->hw.ctrla |= llc_hw_ctrla(OWL_DMA_MODE_LME, 0);
  310. }
  311. return next;
  312. }
  313. static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
  314. struct owl_dma_lli *lli,
  315. dma_addr_t src, dma_addr_t dst,
  316. u32 len, enum dma_transfer_direction dir,
  317. struct dma_slave_config *sconfig,
  318. bool is_cyclic)
  319. {
  320. struct owl_dma_lli_hw *hw = &lli->hw;
  321. u32 mode;
  322. mode = OWL_DMA_MODE_PW(0);
  323. switch (dir) {
  324. case DMA_MEM_TO_MEM:
  325. mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU |
  326. OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC |
  327. OWL_DMA_MODE_DAM_INC;
  328. break;
  329. case DMA_MEM_TO_DEV:
  330. mode |= OWL_DMA_MODE_TS(vchan->drq)
  331. | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV
  332. | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST;
  333. /*
  334. * Hardware only supports 32bit and 8bit buswidth. Since the
  335. * default is 32bit, select 8bit only when requested.
  336. */
  337. if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
  338. mode |= OWL_DMA_MODE_NDDBW_8BIT;
  339. break;
  340. case DMA_DEV_TO_MEM:
  341. mode |= OWL_DMA_MODE_TS(vchan->drq)
  342. | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU
  343. | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC;
  344. /*
  345. * Hardware only supports 32bit and 8bit buswidth. Since the
  346. * default is 32bit, select 8bit only when requested.
  347. */
  348. if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
  349. mode |= OWL_DMA_MODE_NDDBW_8BIT;
  350. break;
  351. default:
  352. return -EINVAL;
  353. }
  354. hw->next_lli = 0; /* One link list by default */
  355. hw->saddr = src;
  356. hw->daddr = dst;
  357. hw->fcnt = 1; /* Frame count fixed as 1 */
  358. hw->flen = len; /* Max frame length is 1MB */
  359. hw->src_stride = 0;
  360. hw->dst_stride = 0;
  361. hw->ctrla = llc_hw_ctrla(mode,
  362. OWL_DMA_LLC_SAV_LOAD_NEXT |
  363. OWL_DMA_LLC_DAV_LOAD_NEXT);
  364. if (is_cyclic)
  365. hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK);
  366. else
  367. hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
  368. return 0;
  369. }
  370. static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od,
  371. struct owl_dma_vchan *vchan)
  372. {
  373. struct owl_dma_pchan *pchan = NULL;
  374. unsigned long flags;
  375. int i;
  376. for (i = 0; i < od->nr_pchans; i++) {
  377. pchan = &od->pchans[i];
  378. spin_lock_irqsave(&pchan->lock, flags);
  379. if (!pchan->vchan) {
  380. pchan->vchan = vchan;
  381. spin_unlock_irqrestore(&pchan->lock, flags);
  382. break;
  383. }
  384. spin_unlock_irqrestore(&pchan->lock, flags);
  385. }
  386. return pchan;
  387. }
  388. static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan)
  389. {
  390. unsigned int val;
  391. val = dma_readl(od, OWL_DMA_IDLE_STAT);
  392. return !(val & (1 << pchan->id));
  393. }
  394. static void owl_dma_terminate_pchan(struct owl_dma *od,
  395. struct owl_dma_pchan *pchan)
  396. {
  397. unsigned long flags;
  398. u32 irq_pd;
  399. pchan_writel(pchan, OWL_DMAX_START, 0);
  400. pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
  401. spin_lock_irqsave(&od->lock, flags);
  402. dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false);
  403. irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0);
  404. if (irq_pd & (1 << pchan->id)) {
  405. dev_warn(od->dma.dev,
  406. "terminating pchan %d that still has pending irq\n",
  407. pchan->id);
  408. dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id));
  409. }
  410. pchan->vchan = NULL;
  411. spin_unlock_irqrestore(&od->lock, flags);
  412. }
  413. static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan)
  414. {
  415. pchan_writel(pchan, 1, OWL_DMAX_PAUSE);
  416. }
  417. static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan)
  418. {
  419. pchan_writel(pchan, 0, OWL_DMAX_PAUSE);
  420. }
  421. static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
  422. {
  423. struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
  424. struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc);
  425. struct owl_dma_pchan *pchan = vchan->pchan;
  426. struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
  427. struct owl_dma_lli *lli;
  428. unsigned long flags;
  429. u32 int_ctl;
  430. list_del(&vd->node);
  431. vchan->txd = txd;
  432. /* Wait for channel inactive */
  433. while (owl_dma_pchan_busy(od, pchan))
  434. cpu_relax();
  435. lli = list_first_entry(&txd->lli_list,
  436. struct owl_dma_lli, node);
  437. if (txd->cyclic)
  438. int_ctl = OWL_DMA_INTCTL_BLOCK;
  439. else
  440. int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
  441. pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME);
  442. pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL,
  443. OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT);
  444. pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys);
  445. pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl);
  446. /* Clear IRQ status for this pchan */
  447. pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
  448. spin_lock_irqsave(&od->lock, flags);
  449. dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true);
  450. spin_unlock_irqrestore(&od->lock, flags);
  451. dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id);
  452. /* Start DMA transfer for this pchan */
  453. pchan_writel(pchan, OWL_DMAX_START, 0x1);
  454. return 0;
  455. }
  456. static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan)
  457. {
  458. /* Ensure that the physical channel is stopped */
  459. owl_dma_terminate_pchan(od, vchan->pchan);
  460. vchan->pchan = NULL;
  461. }
  462. static irqreturn_t owl_dma_interrupt(int irq, void *dev_id)
  463. {
  464. struct owl_dma *od = dev_id;
  465. struct owl_dma_vchan *vchan;
  466. struct owl_dma_pchan *pchan;
  467. unsigned long pending;
  468. int i;
  469. unsigned int global_irq_pending, chan_irq_pending;
  470. spin_lock(&od->lock);
  471. pending = dma_readl(od, OWL_DMA_IRQ_PD0);
  472. /* Clear IRQ status for each pchan */
  473. for_each_set_bit(i, &pending, od->nr_pchans) {
  474. pchan = &od->pchans[i];
  475. pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
  476. }
  477. /* Clear pending IRQ */
  478. dma_writel(od, OWL_DMA_IRQ_PD0, pending);
  479. /* Check missed pending IRQ */
  480. for (i = 0; i < od->nr_pchans; i++) {
  481. pchan = &od->pchans[i];
  482. chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) &
  483. pchan_readl(pchan, OWL_DMAX_INT_STATUS);
  484. /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */
  485. dma_readl(od, OWL_DMA_IRQ_PD0);
  486. global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0);
  487. if (chan_irq_pending && !(global_irq_pending & BIT(i))) {
  488. dev_dbg(od->dma.dev,
  489. "global and channel IRQ pending match err\n");
  490. /* Clear IRQ status for this pchan */
  491. pchan_update(pchan, OWL_DMAX_INT_STATUS,
  492. 0xff, false);
  493. /* Update global IRQ pending */
  494. pending |= BIT(i);
  495. }
  496. }
  497. spin_unlock(&od->lock);
  498. for_each_set_bit(i, &pending, od->nr_pchans) {
  499. struct owl_dma_txd *txd;
  500. pchan = &od->pchans[i];
  501. vchan = pchan->vchan;
  502. if (!vchan) {
  503. dev_warn(od->dma.dev, "no vchan attached on pchan %d\n",
  504. pchan->id);
  505. continue;
  506. }
  507. spin_lock(&vchan->vc.lock);
  508. txd = vchan->txd;
  509. if (txd) {
  510. vchan->txd = NULL;
  511. vchan_cookie_complete(&txd->vd);
  512. /*
  513. * Start the next descriptor (if any),
  514. * otherwise free this channel.
  515. */
  516. if (vchan_next_desc(&vchan->vc))
  517. owl_dma_start_next_txd(vchan);
  518. else
  519. owl_dma_phy_free(od, vchan);
  520. }
  521. spin_unlock(&vchan->vc.lock);
  522. }
  523. return IRQ_HANDLED;
  524. }
  525. static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd)
  526. {
  527. struct owl_dma_lli *lli, *_lli;
  528. if (unlikely(!txd))
  529. return;
  530. list_for_each_entry_safe(lli, _lli, &txd->lli_list, node)
  531. owl_dma_free_lli(od, lli);
  532. kfree(txd);
  533. }
  534. static void owl_dma_desc_free(struct virt_dma_desc *vd)
  535. {
  536. struct owl_dma *od = to_owl_dma(vd->tx.chan->device);
  537. struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
  538. owl_dma_free_txd(od, txd);
  539. }
  540. static int owl_dma_terminate_all(struct dma_chan *chan)
  541. {
  542. struct owl_dma *od = to_owl_dma(chan->device);
  543. struct owl_dma_vchan *vchan = to_owl_vchan(chan);
  544. unsigned long flags;
  545. LIST_HEAD(head);
  546. spin_lock_irqsave(&vchan->vc.lock, flags);
  547. if (vchan->pchan)
  548. owl_dma_phy_free(od, vchan);
  549. if (vchan->txd) {
  550. owl_dma_desc_free(&vchan->txd->vd);
  551. vchan->txd = NULL;
  552. }
  553. vchan_get_all_descriptors(&vchan->vc, &head);
  554. vchan_dma_desc_free_list(&vchan->vc, &head);
  555. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  556. return 0;
  557. }
  558. static int owl_dma_config(struct dma_chan *chan,
  559. struct dma_slave_config *config)
  560. {
  561. struct owl_dma_vchan *vchan = to_owl_vchan(chan);
  562. /* Reject definitely invalid configurations */
  563. if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
  564. config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
  565. return -EINVAL;
  566. memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config));
  567. return 0;
  568. }
  569. static int owl_dma_pause(struct dma_chan *chan)
  570. {
  571. struct owl_dma_vchan *vchan = to_owl_vchan(chan);
  572. unsigned long flags;
  573. spin_lock_irqsave(&vchan->vc.lock, flags);
  574. owl_dma_pause_pchan(vchan->pchan);
  575. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  576. return 0;
  577. }
  578. static int owl_dma_resume(struct dma_chan *chan)
  579. {
  580. struct owl_dma_vchan *vchan = to_owl_vchan(chan);
  581. unsigned long flags;
  582. if (!vchan->pchan && !vchan->txd)
  583. return 0;
  584. dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
  585. spin_lock_irqsave(&vchan->vc.lock, flags);
  586. owl_dma_resume_pchan(vchan->pchan);
  587. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  588. return 0;
  589. }
  590. static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan)
  591. {
  592. struct owl_dma_pchan *pchan;
  593. struct owl_dma_txd *txd;
  594. struct owl_dma_lli *lli;
  595. unsigned int next_lli_phy;
  596. size_t bytes;
  597. pchan = vchan->pchan;
  598. txd = vchan->txd;
  599. if (!pchan || !txd)
  600. return 0;
  601. /* Get remain count of current node in link list */
  602. bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT);
  603. /* Loop through the preceding nodes to get total remaining bytes */
  604. if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) {
  605. next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR);
  606. list_for_each_entry(lli, &txd->lli_list, node) {
  607. /* Start from the next active node */
  608. if (lli->phys == next_lli_phy) {
  609. list_for_each_entry(lli, &txd->lli_list, node)
  610. bytes += lli->hw.flen;
  611. break;
  612. }
  613. }
  614. }
  615. return bytes;
  616. }
  617. static enum dma_status owl_dma_tx_status(struct dma_chan *chan,
  618. dma_cookie_t cookie,
  619. struct dma_tx_state *state)
  620. {
  621. struct owl_dma_vchan *vchan = to_owl_vchan(chan);
  622. struct owl_dma_lli *lli;
  623. struct virt_dma_desc *vd;
  624. struct owl_dma_txd *txd;
  625. enum dma_status ret;
  626. unsigned long flags;
  627. size_t bytes = 0;
  628. ret = dma_cookie_status(chan, cookie, state);
  629. if (ret == DMA_COMPLETE || !state)
  630. return ret;
  631. spin_lock_irqsave(&vchan->vc.lock, flags);
  632. vd = vchan_find_desc(&vchan->vc, cookie);
  633. if (vd) {
  634. txd = to_owl_txd(&vd->tx);
  635. list_for_each_entry(lli, &txd->lli_list, node)
  636. bytes += lli->hw.flen;
  637. } else {
  638. bytes = owl_dma_getbytes_chan(vchan);
  639. }
  640. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  641. dma_set_residue(state, bytes);
  642. return ret;
  643. }
  644. static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan)
  645. {
  646. struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
  647. struct owl_dma_pchan *pchan;
  648. pchan = owl_dma_get_pchan(od, vchan);
  649. if (!pchan)
  650. return;
  651. dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id);
  652. vchan->pchan = pchan;
  653. owl_dma_start_next_txd(vchan);
  654. }
  655. static void owl_dma_issue_pending(struct dma_chan *chan)
  656. {
  657. struct owl_dma_vchan *vchan = to_owl_vchan(chan);
  658. unsigned long flags;
  659. spin_lock_irqsave(&vchan->vc.lock, flags);
  660. if (vchan_issue_pending(&vchan->vc)) {
  661. if (!vchan->pchan)
  662. owl_dma_phy_alloc_and_start(vchan);
  663. }
  664. spin_unlock_irqrestore(&vchan->vc.lock, flags);
  665. }
  666. static struct dma_async_tx_descriptor
  667. *owl_dma_prep_memcpy(struct dma_chan *chan,
  668. dma_addr_t dst, dma_addr_t src,
  669. size_t len, unsigned long flags)
  670. {
  671. struct owl_dma *od = to_owl_dma(chan->device);
  672. struct owl_dma_vchan *vchan = to_owl_vchan(chan);
  673. struct owl_dma_txd *txd;
  674. struct owl_dma_lli *lli, *prev = NULL;
  675. size_t offset, bytes;
  676. int ret;
  677. if (!len)
  678. return NULL;
  679. txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
  680. if (!txd)
  681. return NULL;
  682. INIT_LIST_HEAD(&txd->lli_list);
  683. /* Process the transfer as frame by frame */
  684. for (offset = 0; offset < len; offset += bytes) {
  685. lli = owl_dma_alloc_lli(od);
  686. if (!lli) {
  687. dev_warn(chan2dev(chan), "failed to allocate lli\n");
  688. goto err_txd_free;
  689. }
  690. bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH);
  691. ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset,
  692. bytes, DMA_MEM_TO_MEM,
  693. &vchan->cfg, txd->cyclic);
  694. if (ret) {
  695. dev_warn(chan2dev(chan), "failed to config lli\n");
  696. goto err_txd_free;
  697. }
  698. prev = owl_dma_add_lli(txd, prev, lli, false);
  699. }
  700. return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
  701. err_txd_free:
  702. owl_dma_free_txd(od, txd);
  703. return NULL;
  704. }
  705. static struct dma_async_tx_descriptor
  706. *owl_dma_prep_slave_sg(struct dma_chan *chan,
  707. struct scatterlist *sgl,
  708. unsigned int sg_len,
  709. enum dma_transfer_direction dir,
  710. unsigned long flags, void *context)
  711. {
  712. struct owl_dma *od = to_owl_dma(chan->device);
  713. struct owl_dma_vchan *vchan = to_owl_vchan(chan);
  714. struct dma_slave_config *sconfig = &vchan->cfg;
  715. struct owl_dma_txd *txd;
  716. struct owl_dma_lli *lli, *prev = NULL;
  717. struct scatterlist *sg;
  718. dma_addr_t addr, src = 0, dst = 0;
  719. size_t len;
  720. int ret, i;
  721. txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
  722. if (!txd)
  723. return NULL;
  724. INIT_LIST_HEAD(&txd->lli_list);
  725. for_each_sg(sgl, sg, sg_len, i) {
  726. addr = sg_dma_address(sg);
  727. len = sg_dma_len(sg);
  728. if (len > OWL_DMA_FRAME_MAX_LENGTH) {
  729. dev_err(od->dma.dev,
  730. "frame length exceeds max supported length");
  731. goto err_txd_free;
  732. }
  733. lli = owl_dma_alloc_lli(od);
  734. if (!lli) {
  735. dev_err(chan2dev(chan), "failed to allocate lli");
  736. goto err_txd_free;
  737. }
  738. if (dir == DMA_MEM_TO_DEV) {
  739. src = addr;
  740. dst = sconfig->dst_addr;
  741. } else {
  742. src = sconfig->src_addr;
  743. dst = addr;
  744. }
  745. ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig,
  746. txd->cyclic);
  747. if (ret) {
  748. dev_warn(chan2dev(chan), "failed to config lli");
  749. goto err_txd_free;
  750. }
  751. prev = owl_dma_add_lli(txd, prev, lli, false);
  752. }
  753. return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
  754. err_txd_free:
  755. owl_dma_free_txd(od, txd);
  756. return NULL;
  757. }
  758. static struct dma_async_tx_descriptor
  759. *owl_prep_dma_cyclic(struct dma_chan *chan,
  760. dma_addr_t buf_addr, size_t buf_len,
  761. size_t period_len,
  762. enum dma_transfer_direction dir,
  763. unsigned long flags)
  764. {
  765. struct owl_dma *od = to_owl_dma(chan->device);
  766. struct owl_dma_vchan *vchan = to_owl_vchan(chan);
  767. struct dma_slave_config *sconfig = &vchan->cfg;
  768. struct owl_dma_txd *txd;
  769. struct owl_dma_lli *lli, *prev = NULL, *first = NULL;
  770. dma_addr_t src = 0, dst = 0;
  771. unsigned int periods = buf_len / period_len;
  772. int ret, i;
  773. txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
  774. if (!txd)
  775. return NULL;
  776. INIT_LIST_HEAD(&txd->lli_list);
  777. txd->cyclic = true;
  778. for (i = 0; i < periods; i++) {
  779. lli = owl_dma_alloc_lli(od);
  780. if (!lli) {
  781. dev_warn(chan2dev(chan), "failed to allocate lli");
  782. goto err_txd_free;
  783. }
  784. if (dir == DMA_MEM_TO_DEV) {
  785. src = buf_addr + (period_len * i);
  786. dst = sconfig->dst_addr;
  787. } else if (dir == DMA_DEV_TO_MEM) {
  788. src = sconfig->src_addr;
  789. dst = buf_addr + (period_len * i);
  790. }
  791. ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len,
  792. dir, sconfig, txd->cyclic);
  793. if (ret) {
  794. dev_warn(chan2dev(chan), "failed to config lli");
  795. goto err_txd_free;
  796. }
  797. if (!first)
  798. first = lli;
  799. prev = owl_dma_add_lli(txd, prev, lli, false);
  800. }
  801. /* close the cyclic list */
  802. owl_dma_add_lli(txd, prev, first, true);
  803. return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
  804. err_txd_free:
  805. owl_dma_free_txd(od, txd);
  806. return NULL;
  807. }
  808. static void owl_dma_free_chan_resources(struct dma_chan *chan)
  809. {
  810. struct owl_dma_vchan *vchan = to_owl_vchan(chan);
  811. /* Ensure all queued descriptors are freed */
  812. vchan_free_chan_resources(&vchan->vc);
  813. }
  814. static inline void owl_dma_free(struct owl_dma *od)
  815. {
  816. struct owl_dma_vchan *vchan = NULL;
  817. struct owl_dma_vchan *next;
  818. list_for_each_entry_safe(vchan,
  819. next, &od->dma.channels, vc.chan.device_node) {
  820. list_del(&vchan->vc.chan.device_node);
  821. tasklet_kill(&vchan->vc.task);
  822. }
  823. }
  824. static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
  825. struct of_dma *ofdma)
  826. {
  827. struct owl_dma *od = ofdma->of_dma_data;
  828. struct owl_dma_vchan *vchan;
  829. struct dma_chan *chan;
  830. u8 drq = dma_spec->args[0];
  831. if (drq > od->nr_vchans)
  832. return NULL;
  833. chan = dma_get_any_slave_channel(&od->dma);
  834. if (!chan)
  835. return NULL;
  836. vchan = to_owl_vchan(chan);
  837. vchan->drq = drq;
  838. return chan;
  839. }
  840. static int owl_dma_probe(struct platform_device *pdev)
  841. {
  842. struct device_node *np = pdev->dev.of_node;
  843. struct owl_dma *od;
  844. struct resource *res;
  845. int ret, i, nr_channels, nr_requests;
  846. od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
  847. if (!od)
  848. return -ENOMEM;
  849. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  850. if (!res)
  851. return -EINVAL;
  852. od->base = devm_ioremap_resource(&pdev->dev, res);
  853. if (IS_ERR(od->base))
  854. return PTR_ERR(od->base);
  855. ret = of_property_read_u32(np, "dma-channels", &nr_channels);
  856. if (ret) {
  857. dev_err(&pdev->dev, "can't get dma-channels\n");
  858. return ret;
  859. }
  860. ret = of_property_read_u32(np, "dma-requests", &nr_requests);
  861. if (ret) {
  862. dev_err(&pdev->dev, "can't get dma-requests\n");
  863. return ret;
  864. }
  865. dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
  866. nr_channels, nr_requests);
  867. od->nr_pchans = nr_channels;
  868. od->nr_vchans = nr_requests;
  869. pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  870. platform_set_drvdata(pdev, od);
  871. spin_lock_init(&od->lock);
  872. dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
  873. dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
  874. dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
  875. od->dma.dev = &pdev->dev;
  876. od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
  877. od->dma.device_tx_status = owl_dma_tx_status;
  878. od->dma.device_issue_pending = owl_dma_issue_pending;
  879. od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
  880. od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
  881. od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
  882. od->dma.device_config = owl_dma_config;
  883. od->dma.device_pause = owl_dma_pause;
  884. od->dma.device_resume = owl_dma_resume;
  885. od->dma.device_terminate_all = owl_dma_terminate_all;
  886. od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  887. od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  888. od->dma.directions = BIT(DMA_MEM_TO_MEM);
  889. od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  890. INIT_LIST_HEAD(&od->dma.channels);
  891. od->clk = devm_clk_get(&pdev->dev, NULL);
  892. if (IS_ERR(od->clk)) {
  893. dev_err(&pdev->dev, "unable to get clock\n");
  894. return PTR_ERR(od->clk);
  895. }
  896. /*
  897. * Eventhough the DMA controller is capable of generating 4
  898. * IRQ's for DMA priority feature, we only use 1 IRQ for
  899. * simplification.
  900. */
  901. od->irq = platform_get_irq(pdev, 0);
  902. ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0,
  903. dev_name(&pdev->dev), od);
  904. if (ret) {
  905. dev_err(&pdev->dev, "unable to request IRQ\n");
  906. return ret;
  907. }
  908. /* Init physical channel */
  909. od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans,
  910. sizeof(struct owl_dma_pchan), GFP_KERNEL);
  911. if (!od->pchans)
  912. return -ENOMEM;
  913. for (i = 0; i < od->nr_pchans; i++) {
  914. struct owl_dma_pchan *pchan = &od->pchans[i];
  915. pchan->id = i;
  916. pchan->base = od->base + OWL_DMA_CHAN_BASE(i);
  917. }
  918. /* Init virtual channel */
  919. od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans,
  920. sizeof(struct owl_dma_vchan), GFP_KERNEL);
  921. if (!od->vchans)
  922. return -ENOMEM;
  923. for (i = 0; i < od->nr_vchans; i++) {
  924. struct owl_dma_vchan *vchan = &od->vchans[i];
  925. vchan->vc.desc_free = owl_dma_desc_free;
  926. vchan_init(&vchan->vc, &od->dma);
  927. }
  928. /* Create a pool of consistent memory blocks for hardware descriptors */
  929. od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev,
  930. sizeof(struct owl_dma_lli),
  931. __alignof__(struct owl_dma_lli),
  932. 0);
  933. if (!od->lli_pool) {
  934. dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n");
  935. return -ENOMEM;
  936. }
  937. clk_prepare_enable(od->clk);
  938. ret = dma_async_device_register(&od->dma);
  939. if (ret) {
  940. dev_err(&pdev->dev, "failed to register DMA engine device\n");
  941. goto err_pool_free;
  942. }
  943. /* Device-tree DMA controller registration */
  944. ret = of_dma_controller_register(pdev->dev.of_node,
  945. owl_dma_of_xlate, od);
  946. if (ret) {
  947. dev_err(&pdev->dev, "of_dma_controller_register failed\n");
  948. goto err_dma_unregister;
  949. }
  950. return 0;
  951. err_dma_unregister:
  952. dma_async_device_unregister(&od->dma);
  953. err_pool_free:
  954. clk_disable_unprepare(od->clk);
  955. dma_pool_destroy(od->lli_pool);
  956. return ret;
  957. }
  958. static int owl_dma_remove(struct platform_device *pdev)
  959. {
  960. struct owl_dma *od = platform_get_drvdata(pdev);
  961. of_dma_controller_free(pdev->dev.of_node);
  962. dma_async_device_unregister(&od->dma);
  963. /* Mask all interrupts for this execution environment */
  964. dma_writel(od, OWL_DMA_IRQ_EN0, 0x0);
  965. /* Make sure we won't have any further interrupts */
  966. devm_free_irq(od->dma.dev, od->irq, od);
  967. owl_dma_free(od);
  968. clk_disable_unprepare(od->clk);
  969. return 0;
  970. }
  971. static const struct of_device_id owl_dma_match[] = {
  972. { .compatible = "actions,s900-dma", },
  973. { /* sentinel */ }
  974. };
  975. MODULE_DEVICE_TABLE(of, owl_dma_match);
  976. static struct platform_driver owl_dma_driver = {
  977. .probe = owl_dma_probe,
  978. .remove = owl_dma_remove,
  979. .driver = {
  980. .name = "dma-owl",
  981. .of_match_table = of_match_ptr(owl_dma_match),
  982. },
  983. };
  984. static int owl_dma_init(void)
  985. {
  986. return platform_driver_register(&owl_dma_driver);
  987. }
  988. subsys_initcall(owl_dma_init);
  989. static void __exit owl_dma_exit(void)
  990. {
  991. platform_driver_unregister(&owl_dma_driver);
  992. }
  993. module_exit(owl_dma_exit);
  994. MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>");
  995. MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
  996. MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver");
  997. MODULE_LICENSE("GPL");