sprd-dma.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112
  1. /*
  2. * Copyright (C) 2017 Spreadtrum Communications Inc.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/dma/sprd-dma.h>
  9. #include <linux/errno.h>
  10. #include <linux/init.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/of_dma.h>
  17. #include <linux/of_device.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/slab.h>
  20. #include "virt-dma.h"
  21. #define SPRD_DMA_CHN_REG_OFFSET 0x1000
  22. #define SPRD_DMA_CHN_REG_LENGTH 0x40
  23. #define SPRD_DMA_MEMCPY_MIN_SIZE 64
  24. /* DMA global registers definition */
  25. #define SPRD_DMA_GLB_PAUSE 0x0
  26. #define SPRD_DMA_GLB_FRAG_WAIT 0x4
  27. #define SPRD_DMA_GLB_REQ_PEND0_EN 0x8
  28. #define SPRD_DMA_GLB_REQ_PEND1_EN 0xc
  29. #define SPRD_DMA_GLB_INT_RAW_STS 0x10
  30. #define SPRD_DMA_GLB_INT_MSK_STS 0x14
  31. #define SPRD_DMA_GLB_REQ_STS 0x18
  32. #define SPRD_DMA_GLB_CHN_EN_STS 0x1c
  33. #define SPRD_DMA_GLB_DEBUG_STS 0x20
  34. #define SPRD_DMA_GLB_ARB_SEL_STS 0x24
  35. #define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1))
  36. #define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000
  37. /* DMA channel registers definition */
  38. #define SPRD_DMA_CHN_PAUSE 0x0
  39. #define SPRD_DMA_CHN_REQ 0x4
  40. #define SPRD_DMA_CHN_CFG 0x8
  41. #define SPRD_DMA_CHN_INTC 0xc
  42. #define SPRD_DMA_CHN_SRC_ADDR 0x10
  43. #define SPRD_DMA_CHN_DES_ADDR 0x14
  44. #define SPRD_DMA_CHN_FRG_LEN 0x18
  45. #define SPRD_DMA_CHN_BLK_LEN 0x1c
  46. #define SPRD_DMA_CHN_TRSC_LEN 0x20
  47. #define SPRD_DMA_CHN_TRSF_STEP 0x24
  48. #define SPRD_DMA_CHN_WARP_PTR 0x28
  49. #define SPRD_DMA_CHN_WARP_TO 0x2c
  50. #define SPRD_DMA_CHN_LLIST_PTR 0x30
  51. #define SPRD_DMA_CHN_FRAG_STEP 0x34
  52. #define SPRD_DMA_CHN_SRC_BLK_STEP 0x38
  53. #define SPRD_DMA_CHN_DES_BLK_STEP 0x3c
  54. /* SPRD_DMA_CHN_INTC register definition */
  55. #define SPRD_DMA_INT_MASK GENMASK(4, 0)
  56. #define SPRD_DMA_INT_CLR_OFFSET 24
  57. #define SPRD_DMA_FRAG_INT_EN BIT(0)
  58. #define SPRD_DMA_BLK_INT_EN BIT(1)
  59. #define SPRD_DMA_TRANS_INT_EN BIT(2)
  60. #define SPRD_DMA_LIST_INT_EN BIT(3)
  61. #define SPRD_DMA_CFG_ERR_INT_EN BIT(4)
  62. /* SPRD_DMA_CHN_CFG register definition */
  63. #define SPRD_DMA_CHN_EN BIT(0)
  64. #define SPRD_DMA_LINKLIST_EN BIT(4)
  65. #define SPRD_DMA_WAIT_BDONE_OFFSET 24
  66. #define SPRD_DMA_DONOT_WAIT_BDONE 1
  67. /* SPRD_DMA_CHN_REQ register definition */
  68. #define SPRD_DMA_REQ_EN BIT(0)
  69. /* SPRD_DMA_CHN_PAUSE register definition */
  70. #define SPRD_DMA_PAUSE_EN BIT(0)
  71. #define SPRD_DMA_PAUSE_STS BIT(2)
  72. #define SPRD_DMA_PAUSE_CNT 0x2000
  73. /* DMA_CHN_WARP_* register definition */
  74. #define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
  75. #define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
  76. #define SPRD_DMA_HIGH_ADDR_OFFSET 4
  77. /* SPRD_DMA_CHN_INTC register definition */
  78. #define SPRD_DMA_FRAG_INT_STS BIT(16)
  79. #define SPRD_DMA_BLK_INT_STS BIT(17)
  80. #define SPRD_DMA_TRSC_INT_STS BIT(18)
  81. #define SPRD_DMA_LIST_INT_STS BIT(19)
  82. #define SPRD_DMA_CFGERR_INT_STS BIT(20)
  83. #define SPRD_DMA_CHN_INT_STS \
  84. (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \
  85. SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \
  86. SPRD_DMA_CFGERR_INT_STS)
  87. /* SPRD_DMA_CHN_FRG_LEN register definition */
  88. #define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30
  89. #define SPRD_DMA_DES_DATAWIDTH_OFFSET 28
  90. #define SPRD_DMA_SWT_MODE_OFFSET 26
  91. #define SPRD_DMA_REQ_MODE_OFFSET 24
  92. #define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
  93. #define SPRD_DMA_FIX_SEL_OFFSET 21
  94. #define SPRD_DMA_FIX_EN_OFFSET 20
  95. #define SPRD_DMA_LLIST_END BIT(19)
  96. #define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
  97. /* SPRD_DMA_CHN_BLK_LEN register definition */
  98. #define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0)
  99. /* SPRD_DMA_CHN_TRSC_LEN register definition */
  100. #define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0)
  101. /* SPRD_DMA_CHN_TRSF_STEP register definition */
  102. #define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16
  103. #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
  104. #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
  105. /* define the DMA transfer step type */
  106. #define SPRD_DMA_NONE_STEP 0
  107. #define SPRD_DMA_BYTE_STEP 1
  108. #define SPRD_DMA_SHORT_STEP 2
  109. #define SPRD_DMA_WORD_STEP 4
  110. #define SPRD_DMA_DWORD_STEP 8
  111. #define SPRD_DMA_SOFTWARE_UID 0
  112. /* dma data width values */
  113. enum sprd_dma_datawidth {
  114. SPRD_DMA_DATAWIDTH_1_BYTE,
  115. SPRD_DMA_DATAWIDTH_2_BYTES,
  116. SPRD_DMA_DATAWIDTH_4_BYTES,
  117. SPRD_DMA_DATAWIDTH_8_BYTES,
  118. };
  119. /* dma channel hardware configuration */
  120. struct sprd_dma_chn_hw {
  121. u32 pause;
  122. u32 req;
  123. u32 cfg;
  124. u32 intc;
  125. u32 src_addr;
  126. u32 des_addr;
  127. u32 frg_len;
  128. u32 blk_len;
  129. u32 trsc_len;
  130. u32 trsf_step;
  131. u32 wrap_ptr;
  132. u32 wrap_to;
  133. u32 llist_ptr;
  134. u32 frg_step;
  135. u32 src_blk_step;
  136. u32 des_blk_step;
  137. };
  138. /* dma request description */
  139. struct sprd_dma_desc {
  140. struct virt_dma_desc vd;
  141. struct sprd_dma_chn_hw chn_hw;
  142. };
  143. /* dma channel description */
  144. struct sprd_dma_chn {
  145. struct virt_dma_chan vc;
  146. void __iomem *chn_base;
  147. struct sprd_dma_linklist linklist;
  148. struct dma_slave_config slave_cfg;
  149. u32 chn_num;
  150. u32 dev_id;
  151. struct sprd_dma_desc *cur_desc;
  152. };
  153. /* SPRD dma device */
  154. struct sprd_dma_dev {
  155. struct dma_device dma_dev;
  156. void __iomem *glb_base;
  157. struct clk *clk;
  158. struct clk *ashb_clk;
  159. int irq;
  160. u32 total_chns;
  161. struct sprd_dma_chn channels[0];
  162. };
  163. static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
  164. static struct of_dma_filter_info sprd_dma_info = {
  165. .filter_fn = sprd_dma_filter_fn,
  166. };
  167. static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
  168. {
  169. return container_of(c, struct sprd_dma_chn, vc.chan);
  170. }
  171. static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
  172. {
  173. struct sprd_dma_chn *schan = to_sprd_dma_chan(c);
  174. return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]);
  175. }
  176. static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd)
  177. {
  178. return container_of(vd, struct sprd_dma_desc, vd);
  179. }
  180. static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg,
  181. u32 mask, u32 val)
  182. {
  183. u32 orig = readl(schan->chn_base + reg);
  184. u32 tmp;
  185. tmp = (orig & ~mask) | val;
  186. writel(tmp, schan->chn_base + reg);
  187. }
  188. static int sprd_dma_enable(struct sprd_dma_dev *sdev)
  189. {
  190. int ret;
  191. ret = clk_prepare_enable(sdev->clk);
  192. if (ret)
  193. return ret;
  194. /*
  195. * The ashb_clk is optional and only for AGCP DMA controller, so we
  196. * need add one condition to check if the ashb_clk need enable.
  197. */
  198. if (!IS_ERR(sdev->ashb_clk))
  199. ret = clk_prepare_enable(sdev->ashb_clk);
  200. return ret;
  201. }
  202. static void sprd_dma_disable(struct sprd_dma_dev *sdev)
  203. {
  204. clk_disable_unprepare(sdev->clk);
  205. /*
  206. * Need to check if we need disable the optional ashb_clk for AGCP DMA.
  207. */
  208. if (!IS_ERR(sdev->ashb_clk))
  209. clk_disable_unprepare(sdev->ashb_clk);
  210. }
  211. static void sprd_dma_set_uid(struct sprd_dma_chn *schan)
  212. {
  213. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  214. u32 dev_id = schan->dev_id;
  215. if (dev_id != SPRD_DMA_SOFTWARE_UID) {
  216. u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
  217. SPRD_DMA_GLB_REQ_UID(dev_id);
  218. writel(schan->chn_num + 1, sdev->glb_base + uid_offset);
  219. }
  220. }
  221. static void sprd_dma_unset_uid(struct sprd_dma_chn *schan)
  222. {
  223. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  224. u32 dev_id = schan->dev_id;
  225. if (dev_id != SPRD_DMA_SOFTWARE_UID) {
  226. u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
  227. SPRD_DMA_GLB_REQ_UID(dev_id);
  228. writel(0, sdev->glb_base + uid_offset);
  229. }
  230. }
  231. static void sprd_dma_clear_int(struct sprd_dma_chn *schan)
  232. {
  233. sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC,
  234. SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET,
  235. SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET);
  236. }
  237. static void sprd_dma_enable_chn(struct sprd_dma_chn *schan)
  238. {
  239. sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN,
  240. SPRD_DMA_CHN_EN);
  241. }
  242. static void sprd_dma_disable_chn(struct sprd_dma_chn *schan)
  243. {
  244. sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0);
  245. }
  246. static void sprd_dma_soft_request(struct sprd_dma_chn *schan)
  247. {
  248. sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN,
  249. SPRD_DMA_REQ_EN);
  250. }
  251. static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable)
  252. {
  253. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  254. u32 pause, timeout = SPRD_DMA_PAUSE_CNT;
  255. if (enable) {
  256. sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
  257. SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN);
  258. do {
  259. pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE);
  260. if (pause & SPRD_DMA_PAUSE_STS)
  261. break;
  262. cpu_relax();
  263. } while (--timeout > 0);
  264. if (!timeout)
  265. dev_warn(sdev->dma_dev.dev,
  266. "pause dma controller timeout\n");
  267. } else {
  268. sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
  269. SPRD_DMA_PAUSE_EN, 0);
  270. }
  271. }
  272. static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan)
  273. {
  274. u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG);
  275. if (!(cfg & SPRD_DMA_CHN_EN))
  276. return;
  277. sprd_dma_pause_resume(schan, true);
  278. sprd_dma_disable_chn(schan);
  279. }
  280. static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan)
  281. {
  282. unsigned long addr, addr_high;
  283. addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
  284. addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) &
  285. SPRD_DMA_HIGH_ADDR_MASK;
  286. return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
  287. }
  288. static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan)
  289. {
  290. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  291. u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) &
  292. SPRD_DMA_CHN_INT_STS;
  293. switch (intc_sts) {
  294. case SPRD_DMA_CFGERR_INT_STS:
  295. return SPRD_DMA_CFGERR_INT;
  296. case SPRD_DMA_LIST_INT_STS:
  297. return SPRD_DMA_LIST_INT;
  298. case SPRD_DMA_TRSC_INT_STS:
  299. return SPRD_DMA_TRANS_INT;
  300. case SPRD_DMA_BLK_INT_STS:
  301. return SPRD_DMA_BLK_INT;
  302. case SPRD_DMA_FRAG_INT_STS:
  303. return SPRD_DMA_FRAG_INT;
  304. default:
  305. dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n");
  306. return SPRD_DMA_NO_INT;
  307. }
  308. }
  309. static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan)
  310. {
  311. u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
  312. return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK;
  313. }
  314. static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
  315. struct sprd_dma_desc *sdesc)
  316. {
  317. struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw;
  318. writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE);
  319. writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG);
  320. writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC);
  321. writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
  322. writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
  323. writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
  324. writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN);
  325. writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN);
  326. writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP);
  327. writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR);
  328. writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO);
  329. writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR);
  330. writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP);
  331. writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP);
  332. writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP);
  333. writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
  334. }
  335. static void sprd_dma_start(struct sprd_dma_chn *schan)
  336. {
  337. struct virt_dma_desc *vd = vchan_next_desc(&schan->vc);
  338. if (!vd)
  339. return;
  340. list_del(&vd->node);
  341. schan->cur_desc = to_sprd_dma_desc(vd);
  342. /*
  343. * Copy the DMA configuration from DMA descriptor to this hardware
  344. * channel.
  345. */
  346. sprd_dma_set_chn_config(schan, schan->cur_desc);
  347. sprd_dma_set_uid(schan);
  348. sprd_dma_enable_chn(schan);
  349. if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
  350. sprd_dma_soft_request(schan);
  351. }
  352. static void sprd_dma_stop(struct sprd_dma_chn *schan)
  353. {
  354. sprd_dma_stop_and_disable(schan);
  355. sprd_dma_unset_uid(schan);
  356. sprd_dma_clear_int(schan);
  357. }
  358. static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc,
  359. enum sprd_dma_int_type int_type,
  360. enum sprd_dma_req_mode req_mode)
  361. {
  362. if (int_type == SPRD_DMA_NO_INT)
  363. return false;
  364. if (int_type >= req_mode + 1)
  365. return true;
  366. else
  367. return false;
  368. }
  369. static irqreturn_t dma_irq_handle(int irq, void *dev_id)
  370. {
  371. struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
  372. u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS);
  373. struct sprd_dma_chn *schan;
  374. struct sprd_dma_desc *sdesc;
  375. enum sprd_dma_req_mode req_type;
  376. enum sprd_dma_int_type int_type;
  377. bool trans_done = false;
  378. u32 i;
  379. while (irq_status) {
  380. i = __ffs(irq_status);
  381. irq_status &= (irq_status - 1);
  382. schan = &sdev->channels[i];
  383. spin_lock(&schan->vc.lock);
  384. int_type = sprd_dma_get_int_type(schan);
  385. req_type = sprd_dma_get_req_type(schan);
  386. sprd_dma_clear_int(schan);
  387. sdesc = schan->cur_desc;
  388. /* Check if the dma request descriptor is done. */
  389. trans_done = sprd_dma_check_trans_done(sdesc, int_type,
  390. req_type);
  391. if (trans_done == true) {
  392. vchan_cookie_complete(&sdesc->vd);
  393. schan->cur_desc = NULL;
  394. sprd_dma_start(schan);
  395. }
  396. spin_unlock(&schan->vc.lock);
  397. }
  398. return IRQ_HANDLED;
  399. }
  400. static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
  401. {
  402. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  403. int ret;
  404. ret = pm_runtime_get_sync(chan->device->dev);
  405. if (ret < 0)
  406. return ret;
  407. schan->dev_id = SPRD_DMA_SOFTWARE_UID;
  408. return 0;
  409. }
  410. static void sprd_dma_free_chan_resources(struct dma_chan *chan)
  411. {
  412. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  413. unsigned long flags;
  414. spin_lock_irqsave(&schan->vc.lock, flags);
  415. sprd_dma_stop(schan);
  416. spin_unlock_irqrestore(&schan->vc.lock, flags);
  417. vchan_free_chan_resources(&schan->vc);
  418. pm_runtime_put(chan->device->dev);
  419. }
  420. static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
  421. dma_cookie_t cookie,
  422. struct dma_tx_state *txstate)
  423. {
  424. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  425. struct virt_dma_desc *vd;
  426. unsigned long flags;
  427. enum dma_status ret;
  428. u32 pos;
  429. ret = dma_cookie_status(chan, cookie, txstate);
  430. if (ret == DMA_COMPLETE || !txstate)
  431. return ret;
  432. spin_lock_irqsave(&schan->vc.lock, flags);
  433. vd = vchan_find_desc(&schan->vc, cookie);
  434. if (vd) {
  435. struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
  436. struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
  437. if (hw->trsc_len > 0)
  438. pos = hw->trsc_len;
  439. else if (hw->blk_len > 0)
  440. pos = hw->blk_len;
  441. else if (hw->frg_len > 0)
  442. pos = hw->frg_len;
  443. else
  444. pos = 0;
  445. } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
  446. pos = sprd_dma_get_dst_addr(schan);
  447. } else {
  448. pos = 0;
  449. }
  450. spin_unlock_irqrestore(&schan->vc.lock, flags);
  451. dma_set_residue(txstate, pos);
  452. return ret;
  453. }
  454. static void sprd_dma_issue_pending(struct dma_chan *chan)
  455. {
  456. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  457. unsigned long flags;
  458. spin_lock_irqsave(&schan->vc.lock, flags);
  459. if (vchan_issue_pending(&schan->vc) && !schan->cur_desc)
  460. sprd_dma_start(schan);
  461. spin_unlock_irqrestore(&schan->vc.lock, flags);
  462. }
  463. static int sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
  464. {
  465. switch (buswidth) {
  466. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  467. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  468. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  469. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  470. return ffs(buswidth) - 1;
  471. default:
  472. return -EINVAL;
  473. }
  474. }
  475. static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
  476. {
  477. switch (buswidth) {
  478. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  479. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  480. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  481. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  482. return buswidth;
  483. default:
  484. return -EINVAL;
  485. }
  486. }
  487. static int sprd_dma_fill_desc(struct dma_chan *chan,
  488. struct sprd_dma_chn_hw *hw,
  489. unsigned int sglen, int sg_index,
  490. dma_addr_t src, dma_addr_t dst, u32 len,
  491. enum dma_transfer_direction dir,
  492. unsigned long flags,
  493. struct dma_slave_config *slave_cfg)
  494. {
  495. struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
  496. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  497. u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
  498. u32 int_mode = flags & SPRD_DMA_INT_MASK;
  499. int src_datawidth, dst_datawidth, src_step, dst_step;
  500. u32 temp, fix_mode = 0, fix_en = 0;
  501. if (dir == DMA_MEM_TO_DEV) {
  502. src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
  503. if (src_step < 0) {
  504. dev_err(sdev->dma_dev.dev, "invalid source step\n");
  505. return src_step;
  506. }
  507. dst_step = SPRD_DMA_NONE_STEP;
  508. } else {
  509. dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width);
  510. if (dst_step < 0) {
  511. dev_err(sdev->dma_dev.dev, "invalid destination step\n");
  512. return dst_step;
  513. }
  514. src_step = SPRD_DMA_NONE_STEP;
  515. }
  516. src_datawidth = sprd_dma_get_datawidth(slave_cfg->src_addr_width);
  517. if (src_datawidth < 0) {
  518. dev_err(sdev->dma_dev.dev, "invalid source datawidth\n");
  519. return src_datawidth;
  520. }
  521. dst_datawidth = sprd_dma_get_datawidth(slave_cfg->dst_addr_width);
  522. if (dst_datawidth < 0) {
  523. dev_err(sdev->dma_dev.dev, "invalid destination datawidth\n");
  524. return dst_datawidth;
  525. }
  526. if (slave_cfg->slave_id)
  527. schan->dev_id = slave_cfg->slave_id;
  528. hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
  529. /*
  530. * wrap_ptr and wrap_to will save the high 4 bits source address and
  531. * destination address.
  532. */
  533. hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
  534. hw->wrap_to = (dst >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
  535. hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
  536. hw->des_addr = dst & SPRD_DMA_LOW_ADDR_MASK;
  537. /*
  538. * If the src step and dst step both are 0 or both are not 0, that means
  539. * we can not enable the fix mode. If one is 0 and another one is not,
  540. * we can enable the fix mode.
  541. */
  542. if ((src_step != 0 && dst_step != 0) || (src_step | dst_step) == 0) {
  543. fix_en = 0;
  544. } else {
  545. fix_en = 1;
  546. if (src_step)
  547. fix_mode = 1;
  548. else
  549. fix_mode = 0;
  550. }
  551. hw->intc = int_mode | SPRD_DMA_CFG_ERR_INT_EN;
  552. temp = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
  553. temp |= dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
  554. temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET;
  555. temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET;
  556. temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET;
  557. temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
  558. hw->frg_len = temp;
  559. hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
  560. hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
  561. temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
  562. temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
  563. hw->trsf_step = temp;
  564. /* link-list configuration */
  565. if (schan->linklist.phy_addr) {
  566. if (sg_index == sglen - 1)
  567. hw->frg_len |= SPRD_DMA_LLIST_END;
  568. hw->cfg |= SPRD_DMA_LINKLIST_EN;
  569. /* link-list index */
  570. temp = (sg_index + 1) % sglen;
  571. /* Next link-list configuration's physical address offset */
  572. temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR;
  573. /*
  574. * Set the link-list pointer point to next link-list
  575. * configuration's physical address.
  576. */
  577. hw->llist_ptr = schan->linklist.phy_addr + temp;
  578. } else {
  579. hw->llist_ptr = 0;
  580. }
  581. hw->frg_step = 0;
  582. hw->src_blk_step = 0;
  583. hw->des_blk_step = 0;
  584. return 0;
  585. }
  586. static int sprd_dma_fill_linklist_desc(struct dma_chan *chan,
  587. unsigned int sglen, int sg_index,
  588. dma_addr_t src, dma_addr_t dst, u32 len,
  589. enum dma_transfer_direction dir,
  590. unsigned long flags,
  591. struct dma_slave_config *slave_cfg)
  592. {
  593. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  594. struct sprd_dma_chn_hw *hw;
  595. if (!schan->linklist.virt_addr)
  596. return -EINVAL;
  597. hw = (struct sprd_dma_chn_hw *)(schan->linklist.virt_addr +
  598. sg_index * sizeof(*hw));
  599. return sprd_dma_fill_desc(chan, hw, sglen, sg_index, src, dst, len,
  600. dir, flags, slave_cfg);
  601. }
  602. static struct dma_async_tx_descriptor *
  603. sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  604. size_t len, unsigned long flags)
  605. {
  606. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  607. struct sprd_dma_desc *sdesc;
  608. struct sprd_dma_chn_hw *hw;
  609. enum sprd_dma_datawidth datawidth;
  610. u32 step, temp;
  611. sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
  612. if (!sdesc)
  613. return NULL;
  614. hw = &sdesc->chn_hw;
  615. hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
  616. hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
  617. hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
  618. hw->des_addr = dest & SPRD_DMA_LOW_ADDR_MASK;
  619. hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
  620. SPRD_DMA_HIGH_ADDR_MASK;
  621. hw->wrap_to = (dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
  622. SPRD_DMA_HIGH_ADDR_MASK;
  623. if (IS_ALIGNED(len, 8)) {
  624. datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
  625. step = SPRD_DMA_DWORD_STEP;
  626. } else if (IS_ALIGNED(len, 4)) {
  627. datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
  628. step = SPRD_DMA_WORD_STEP;
  629. } else if (IS_ALIGNED(len, 2)) {
  630. datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
  631. step = SPRD_DMA_SHORT_STEP;
  632. } else {
  633. datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
  634. step = SPRD_DMA_BYTE_STEP;
  635. }
  636. temp = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
  637. temp |= datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
  638. temp |= SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET;
  639. temp |= len & SPRD_DMA_FRG_LEN_MASK;
  640. hw->frg_len = temp;
  641. hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
  642. hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
  643. temp = (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
  644. temp |= (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
  645. hw->trsf_step = temp;
  646. return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
  647. }
  648. static struct dma_async_tx_descriptor *
  649. sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  650. unsigned int sglen, enum dma_transfer_direction dir,
  651. unsigned long flags, void *context)
  652. {
  653. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  654. struct dma_slave_config *slave_cfg = &schan->slave_cfg;
  655. dma_addr_t src = 0, dst = 0;
  656. struct sprd_dma_desc *sdesc;
  657. struct scatterlist *sg;
  658. u32 len = 0;
  659. int ret, i;
  660. if (!is_slave_direction(dir))
  661. return NULL;
  662. if (context) {
  663. struct sprd_dma_linklist *ll_cfg =
  664. (struct sprd_dma_linklist *)context;
  665. schan->linklist.phy_addr = ll_cfg->phy_addr;
  666. schan->linklist.virt_addr = ll_cfg->virt_addr;
  667. } else {
  668. schan->linklist.phy_addr = 0;
  669. schan->linklist.virt_addr = 0;
  670. }
  671. sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
  672. if (!sdesc)
  673. return NULL;
  674. for_each_sg(sgl, sg, sglen, i) {
  675. len = sg_dma_len(sg);
  676. if (dir == DMA_MEM_TO_DEV) {
  677. src = sg_dma_address(sg);
  678. dst = slave_cfg->dst_addr;
  679. } else {
  680. src = slave_cfg->src_addr;
  681. dst = sg_dma_address(sg);
  682. }
  683. /*
  684. * The link-list mode needs at least 2 link-list
  685. * configurations. If there is only one sg, it doesn't
  686. * need to fill the link-list configuration.
  687. */
  688. if (sglen < 2)
  689. break;
  690. ret = sprd_dma_fill_linklist_desc(chan, sglen, i, src, dst, len,
  691. dir, flags, slave_cfg);
  692. if (ret) {
  693. kfree(sdesc);
  694. return NULL;
  695. }
  696. }
  697. ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
  698. dir, flags, slave_cfg);
  699. if (ret) {
  700. kfree(sdesc);
  701. return NULL;
  702. }
  703. return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
  704. }
  705. static int sprd_dma_slave_config(struct dma_chan *chan,
  706. struct dma_slave_config *config)
  707. {
  708. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  709. struct dma_slave_config *slave_cfg = &schan->slave_cfg;
  710. if (!is_slave_direction(config->direction))
  711. return -EINVAL;
  712. memcpy(slave_cfg, config, sizeof(*config));
  713. return 0;
  714. }
  715. static int sprd_dma_pause(struct dma_chan *chan)
  716. {
  717. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  718. unsigned long flags;
  719. spin_lock_irqsave(&schan->vc.lock, flags);
  720. sprd_dma_pause_resume(schan, true);
  721. spin_unlock_irqrestore(&schan->vc.lock, flags);
  722. return 0;
  723. }
  724. static int sprd_dma_resume(struct dma_chan *chan)
  725. {
  726. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  727. unsigned long flags;
  728. spin_lock_irqsave(&schan->vc.lock, flags);
  729. sprd_dma_pause_resume(schan, false);
  730. spin_unlock_irqrestore(&schan->vc.lock, flags);
  731. return 0;
  732. }
  733. static int sprd_dma_terminate_all(struct dma_chan *chan)
  734. {
  735. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  736. unsigned long flags;
  737. LIST_HEAD(head);
  738. spin_lock_irqsave(&schan->vc.lock, flags);
  739. sprd_dma_stop(schan);
  740. vchan_get_all_descriptors(&schan->vc, &head);
  741. spin_unlock_irqrestore(&schan->vc.lock, flags);
  742. vchan_dma_desc_free_list(&schan->vc, &head);
  743. return 0;
  744. }
  745. static void sprd_dma_free_desc(struct virt_dma_desc *vd)
  746. {
  747. struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
  748. kfree(sdesc);
  749. }
  750. static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
  751. {
  752. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  753. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  754. u32 req = *(u32 *)param;
  755. if (req < sdev->total_chns)
  756. return req == schan->chn_num + 1;
  757. else
  758. return false;
  759. }
  760. static int sprd_dma_probe(struct platform_device *pdev)
  761. {
  762. struct device_node *np = pdev->dev.of_node;
  763. struct sprd_dma_dev *sdev;
  764. struct sprd_dma_chn *dma_chn;
  765. struct resource *res;
  766. u32 chn_count;
  767. int ret, i;
  768. ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
  769. if (ret) {
  770. dev_err(&pdev->dev, "get dma channels count failed\n");
  771. return ret;
  772. }
  773. sdev = devm_kzalloc(&pdev->dev,
  774. struct_size(sdev, channels, chn_count),
  775. GFP_KERNEL);
  776. if (!sdev)
  777. return -ENOMEM;
  778. sdev->clk = devm_clk_get(&pdev->dev, "enable");
  779. if (IS_ERR(sdev->clk)) {
  780. dev_err(&pdev->dev, "get enable clock failed\n");
  781. return PTR_ERR(sdev->clk);
  782. }
  783. /* ashb clock is optional for AGCP DMA */
  784. sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb");
  785. if (IS_ERR(sdev->ashb_clk))
  786. dev_warn(&pdev->dev, "no optional ashb eb clock\n");
  787. /*
  788. * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
  789. * DMA controller, it can or do not request the irq, which will save
  790. * system power without resuming system by DMA interrupts if AGCP DMA
  791. * does not request the irq. Thus the DMA interrupts property should
  792. * be optional.
  793. */
  794. sdev->irq = platform_get_irq(pdev, 0);
  795. if (sdev->irq > 0) {
  796. ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle,
  797. 0, "sprd_dma", (void *)sdev);
  798. if (ret < 0) {
  799. dev_err(&pdev->dev, "request dma irq failed\n");
  800. return ret;
  801. }
  802. } else {
  803. dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
  804. }
  805. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  806. sdev->glb_base = devm_ioremap_resource(&pdev->dev, res);
  807. if (IS_ERR(sdev->glb_base))
  808. return PTR_ERR(sdev->glb_base);
  809. dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
  810. sdev->total_chns = chn_count;
  811. sdev->dma_dev.chancnt = chn_count;
  812. INIT_LIST_HEAD(&sdev->dma_dev.channels);
  813. INIT_LIST_HEAD(&sdev->dma_dev.global_node);
  814. sdev->dma_dev.dev = &pdev->dev;
  815. sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
  816. sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
  817. sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
  818. sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
  819. sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
  820. sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
  821. sdev->dma_dev.device_config = sprd_dma_slave_config;
  822. sdev->dma_dev.device_pause = sprd_dma_pause;
  823. sdev->dma_dev.device_resume = sprd_dma_resume;
  824. sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
  825. for (i = 0; i < chn_count; i++) {
  826. dma_chn = &sdev->channels[i];
  827. dma_chn->chn_num = i;
  828. dma_chn->cur_desc = NULL;
  829. /* get each channel's registers base address. */
  830. dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET +
  831. SPRD_DMA_CHN_REG_LENGTH * i;
  832. dma_chn->vc.desc_free = sprd_dma_free_desc;
  833. vchan_init(&dma_chn->vc, &sdev->dma_dev);
  834. }
  835. platform_set_drvdata(pdev, sdev);
  836. ret = sprd_dma_enable(sdev);
  837. if (ret)
  838. return ret;
  839. pm_runtime_set_active(&pdev->dev);
  840. pm_runtime_enable(&pdev->dev);
  841. ret = pm_runtime_get_sync(&pdev->dev);
  842. if (ret < 0)
  843. goto err_rpm;
  844. ret = dma_async_device_register(&sdev->dma_dev);
  845. if (ret < 0) {
  846. dev_err(&pdev->dev, "register dma device failed:%d\n", ret);
  847. goto err_register;
  848. }
  849. sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
  850. ret = of_dma_controller_register(np, of_dma_simple_xlate,
  851. &sprd_dma_info);
  852. if (ret)
  853. goto err_of_register;
  854. pm_runtime_put(&pdev->dev);
  855. return 0;
  856. err_of_register:
  857. dma_async_device_unregister(&sdev->dma_dev);
  858. err_register:
  859. pm_runtime_put_noidle(&pdev->dev);
  860. pm_runtime_disable(&pdev->dev);
  861. err_rpm:
  862. sprd_dma_disable(sdev);
  863. return ret;
  864. }
  865. static int sprd_dma_remove(struct platform_device *pdev)
  866. {
  867. struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
  868. struct sprd_dma_chn *c, *cn;
  869. int ret;
  870. ret = pm_runtime_get_sync(&pdev->dev);
  871. if (ret < 0)
  872. return ret;
  873. /* explicitly free the irq */
  874. if (sdev->irq > 0)
  875. devm_free_irq(&pdev->dev, sdev->irq, sdev);
  876. list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
  877. vc.chan.device_node) {
  878. list_del(&c->vc.chan.device_node);
  879. tasklet_kill(&c->vc.task);
  880. }
  881. of_dma_controller_free(pdev->dev.of_node);
  882. dma_async_device_unregister(&sdev->dma_dev);
  883. sprd_dma_disable(sdev);
  884. pm_runtime_put_noidle(&pdev->dev);
  885. pm_runtime_disable(&pdev->dev);
  886. return 0;
  887. }
  888. static const struct of_device_id sprd_dma_match[] = {
  889. { .compatible = "sprd,sc9860-dma", },
  890. {},
  891. };
  892. static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
  893. {
  894. struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
  895. sprd_dma_disable(sdev);
  896. return 0;
  897. }
  898. static int __maybe_unused sprd_dma_runtime_resume(struct device *dev)
  899. {
  900. struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
  901. int ret;
  902. ret = sprd_dma_enable(sdev);
  903. if (ret)
  904. dev_err(sdev->dma_dev.dev, "enable dma failed\n");
  905. return ret;
  906. }
  907. static const struct dev_pm_ops sprd_dma_pm_ops = {
  908. SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend,
  909. sprd_dma_runtime_resume,
  910. NULL)
  911. };
  912. static struct platform_driver sprd_dma_driver = {
  913. .probe = sprd_dma_probe,
  914. .remove = sprd_dma_remove,
  915. .driver = {
  916. .name = "sprd-dma",
  917. .of_match_table = sprd_dma_match,
  918. .pm = &sprd_dma_pm_ops,
  919. },
  920. };
  921. module_platform_driver(sprd_dma_driver);
  922. MODULE_LICENSE("GPL v2");
  923. MODULE_DESCRIPTION("DMA driver for Spreadtrum");
  924. MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
  925. MODULE_ALIAS("platform:sprd-dma");