sprd-dma.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045
  1. /*
  2. * Copyright (C) 2017 Spreadtrum Communications Inc.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/dma/sprd-dma.h>
  9. #include <linux/errno.h>
  10. #include <linux/init.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/of_dma.h>
  17. #include <linux/of_device.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/slab.h>
  20. #include "virt-dma.h"
  21. #define SPRD_DMA_CHN_REG_OFFSET 0x1000
  22. #define SPRD_DMA_CHN_REG_LENGTH 0x40
  23. #define SPRD_DMA_MEMCPY_MIN_SIZE 64
  24. /* DMA global registers definition */
  25. #define SPRD_DMA_GLB_PAUSE 0x0
  26. #define SPRD_DMA_GLB_FRAG_WAIT 0x4
  27. #define SPRD_DMA_GLB_REQ_PEND0_EN 0x8
  28. #define SPRD_DMA_GLB_REQ_PEND1_EN 0xc
  29. #define SPRD_DMA_GLB_INT_RAW_STS 0x10
  30. #define SPRD_DMA_GLB_INT_MSK_STS 0x14
  31. #define SPRD_DMA_GLB_REQ_STS 0x18
  32. #define SPRD_DMA_GLB_CHN_EN_STS 0x1c
  33. #define SPRD_DMA_GLB_DEBUG_STS 0x20
  34. #define SPRD_DMA_GLB_ARB_SEL_STS 0x24
  35. #define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1))
  36. #define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000
  37. /* DMA channel registers definition */
  38. #define SPRD_DMA_CHN_PAUSE 0x0
  39. #define SPRD_DMA_CHN_REQ 0x4
  40. #define SPRD_DMA_CHN_CFG 0x8
  41. #define SPRD_DMA_CHN_INTC 0xc
  42. #define SPRD_DMA_CHN_SRC_ADDR 0x10
  43. #define SPRD_DMA_CHN_DES_ADDR 0x14
  44. #define SPRD_DMA_CHN_FRG_LEN 0x18
  45. #define SPRD_DMA_CHN_BLK_LEN 0x1c
  46. #define SPRD_DMA_CHN_TRSC_LEN 0x20
  47. #define SPRD_DMA_CHN_TRSF_STEP 0x24
  48. #define SPRD_DMA_CHN_WARP_PTR 0x28
  49. #define SPRD_DMA_CHN_WARP_TO 0x2c
  50. #define SPRD_DMA_CHN_LLIST_PTR 0x30
  51. #define SPRD_DMA_CHN_FRAG_STEP 0x34
  52. #define SPRD_DMA_CHN_SRC_BLK_STEP 0x38
  53. #define SPRD_DMA_CHN_DES_BLK_STEP 0x3c
  54. /* SPRD_DMA_CHN_INTC register definition */
  55. #define SPRD_DMA_INT_MASK GENMASK(4, 0)
  56. #define SPRD_DMA_INT_CLR_OFFSET 24
  57. #define SPRD_DMA_FRAG_INT_EN BIT(0)
  58. #define SPRD_DMA_BLK_INT_EN BIT(1)
  59. #define SPRD_DMA_TRANS_INT_EN BIT(2)
  60. #define SPRD_DMA_LIST_INT_EN BIT(3)
  61. #define SPRD_DMA_CFG_ERR_INT_EN BIT(4)
  62. /* SPRD_DMA_CHN_CFG register definition */
  63. #define SPRD_DMA_CHN_EN BIT(0)
  64. #define SPRD_DMA_WAIT_BDONE_OFFSET 24
  65. #define SPRD_DMA_DONOT_WAIT_BDONE 1
  66. /* SPRD_DMA_CHN_REQ register definition */
  67. #define SPRD_DMA_REQ_EN BIT(0)
  68. /* SPRD_DMA_CHN_PAUSE register definition */
  69. #define SPRD_DMA_PAUSE_EN BIT(0)
  70. #define SPRD_DMA_PAUSE_STS BIT(2)
  71. #define SPRD_DMA_PAUSE_CNT 0x2000
  72. /* DMA_CHN_WARP_* register definition */
  73. #define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
  74. #define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
  75. #define SPRD_DMA_HIGH_ADDR_OFFSET 4
  76. /* SPRD_DMA_CHN_INTC register definition */
  77. #define SPRD_DMA_FRAG_INT_STS BIT(16)
  78. #define SPRD_DMA_BLK_INT_STS BIT(17)
  79. #define SPRD_DMA_TRSC_INT_STS BIT(18)
  80. #define SPRD_DMA_LIST_INT_STS BIT(19)
  81. #define SPRD_DMA_CFGERR_INT_STS BIT(20)
  82. #define SPRD_DMA_CHN_INT_STS \
  83. (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \
  84. SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \
  85. SPRD_DMA_CFGERR_INT_STS)
  86. /* SPRD_DMA_CHN_FRG_LEN register definition */
  87. #define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30
  88. #define SPRD_DMA_DES_DATAWIDTH_OFFSET 28
  89. #define SPRD_DMA_SWT_MODE_OFFSET 26
  90. #define SPRD_DMA_REQ_MODE_OFFSET 24
  91. #define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
  92. #define SPRD_DMA_FIX_SEL_OFFSET 21
  93. #define SPRD_DMA_FIX_EN_OFFSET 20
  94. #define SPRD_DMA_LLIST_END_OFFSET 19
  95. #define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
  96. /* SPRD_DMA_CHN_BLK_LEN register definition */
  97. #define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0)
  98. /* SPRD_DMA_CHN_TRSC_LEN register definition */
  99. #define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0)
  100. /* SPRD_DMA_CHN_TRSF_STEP register definition */
  101. #define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16
  102. #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
  103. #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
  104. /* define the DMA transfer step type */
  105. #define SPRD_DMA_NONE_STEP 0
  106. #define SPRD_DMA_BYTE_STEP 1
  107. #define SPRD_DMA_SHORT_STEP 2
  108. #define SPRD_DMA_WORD_STEP 4
  109. #define SPRD_DMA_DWORD_STEP 8
  110. #define SPRD_DMA_SOFTWARE_UID 0
  111. /* dma data width values */
  112. enum sprd_dma_datawidth {
  113. SPRD_DMA_DATAWIDTH_1_BYTE,
  114. SPRD_DMA_DATAWIDTH_2_BYTES,
  115. SPRD_DMA_DATAWIDTH_4_BYTES,
  116. SPRD_DMA_DATAWIDTH_8_BYTES,
  117. };
  118. /* dma channel hardware configuration */
  119. struct sprd_dma_chn_hw {
  120. u32 pause;
  121. u32 req;
  122. u32 cfg;
  123. u32 intc;
  124. u32 src_addr;
  125. u32 des_addr;
  126. u32 frg_len;
  127. u32 blk_len;
  128. u32 trsc_len;
  129. u32 trsf_step;
  130. u32 wrap_ptr;
  131. u32 wrap_to;
  132. u32 llist_ptr;
  133. u32 frg_step;
  134. u32 src_blk_step;
  135. u32 des_blk_step;
  136. };
  137. /* dma request description */
  138. struct sprd_dma_desc {
  139. struct virt_dma_desc vd;
  140. struct sprd_dma_chn_hw chn_hw;
  141. };
  142. /* dma channel description */
  143. struct sprd_dma_chn {
  144. struct virt_dma_chan vc;
  145. void __iomem *chn_base;
  146. struct dma_slave_config slave_cfg;
  147. u32 chn_num;
  148. u32 dev_id;
  149. struct sprd_dma_desc *cur_desc;
  150. };
  151. /* SPRD dma device */
  152. struct sprd_dma_dev {
  153. struct dma_device dma_dev;
  154. void __iomem *glb_base;
  155. struct clk *clk;
  156. struct clk *ashb_clk;
  157. int irq;
  158. u32 total_chns;
  159. struct sprd_dma_chn channels[0];
  160. };
  161. static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
  162. static struct of_dma_filter_info sprd_dma_info = {
  163. .filter_fn = sprd_dma_filter_fn,
  164. };
  165. static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
  166. {
  167. return container_of(c, struct sprd_dma_chn, vc.chan);
  168. }
  169. static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
  170. {
  171. struct sprd_dma_chn *schan = to_sprd_dma_chan(c);
  172. return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]);
  173. }
  174. static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd)
  175. {
  176. return container_of(vd, struct sprd_dma_desc, vd);
  177. }
  178. static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg,
  179. u32 mask, u32 val)
  180. {
  181. u32 orig = readl(schan->chn_base + reg);
  182. u32 tmp;
  183. tmp = (orig & ~mask) | val;
  184. writel(tmp, schan->chn_base + reg);
  185. }
  186. static int sprd_dma_enable(struct sprd_dma_dev *sdev)
  187. {
  188. int ret;
  189. ret = clk_prepare_enable(sdev->clk);
  190. if (ret)
  191. return ret;
  192. /*
  193. * The ashb_clk is optional and only for AGCP DMA controller, so we
  194. * need add one condition to check if the ashb_clk need enable.
  195. */
  196. if (!IS_ERR(sdev->ashb_clk))
  197. ret = clk_prepare_enable(sdev->ashb_clk);
  198. return ret;
  199. }
  200. static void sprd_dma_disable(struct sprd_dma_dev *sdev)
  201. {
  202. clk_disable_unprepare(sdev->clk);
  203. /*
  204. * Need to check if we need disable the optional ashb_clk for AGCP DMA.
  205. */
  206. if (!IS_ERR(sdev->ashb_clk))
  207. clk_disable_unprepare(sdev->ashb_clk);
  208. }
  209. static void sprd_dma_set_uid(struct sprd_dma_chn *schan)
  210. {
  211. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  212. u32 dev_id = schan->dev_id;
  213. if (dev_id != SPRD_DMA_SOFTWARE_UID) {
  214. u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
  215. SPRD_DMA_GLB_REQ_UID(dev_id);
  216. writel(schan->chn_num + 1, sdev->glb_base + uid_offset);
  217. }
  218. }
  219. static void sprd_dma_unset_uid(struct sprd_dma_chn *schan)
  220. {
  221. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  222. u32 dev_id = schan->dev_id;
  223. if (dev_id != SPRD_DMA_SOFTWARE_UID) {
  224. u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
  225. SPRD_DMA_GLB_REQ_UID(dev_id);
  226. writel(0, sdev->glb_base + uid_offset);
  227. }
  228. }
  229. static void sprd_dma_clear_int(struct sprd_dma_chn *schan)
  230. {
  231. sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC,
  232. SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET,
  233. SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET);
  234. }
  235. static void sprd_dma_enable_chn(struct sprd_dma_chn *schan)
  236. {
  237. sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN,
  238. SPRD_DMA_CHN_EN);
  239. }
  240. static void sprd_dma_disable_chn(struct sprd_dma_chn *schan)
  241. {
  242. sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0);
  243. }
  244. static void sprd_dma_soft_request(struct sprd_dma_chn *schan)
  245. {
  246. sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN,
  247. SPRD_DMA_REQ_EN);
  248. }
  249. static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable)
  250. {
  251. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  252. u32 pause, timeout = SPRD_DMA_PAUSE_CNT;
  253. if (enable) {
  254. sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
  255. SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN);
  256. do {
  257. pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE);
  258. if (pause & SPRD_DMA_PAUSE_STS)
  259. break;
  260. cpu_relax();
  261. } while (--timeout > 0);
  262. if (!timeout)
  263. dev_warn(sdev->dma_dev.dev,
  264. "pause dma controller timeout\n");
  265. } else {
  266. sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
  267. SPRD_DMA_PAUSE_EN, 0);
  268. }
  269. }
  270. static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan)
  271. {
  272. u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG);
  273. if (!(cfg & SPRD_DMA_CHN_EN))
  274. return;
  275. sprd_dma_pause_resume(schan, true);
  276. sprd_dma_disable_chn(schan);
  277. }
  278. static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan)
  279. {
  280. unsigned long addr, addr_high;
  281. addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
  282. addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) &
  283. SPRD_DMA_HIGH_ADDR_MASK;
  284. return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
  285. }
  286. static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan)
  287. {
  288. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  289. u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) &
  290. SPRD_DMA_CHN_INT_STS;
  291. switch (intc_sts) {
  292. case SPRD_DMA_CFGERR_INT_STS:
  293. return SPRD_DMA_CFGERR_INT;
  294. case SPRD_DMA_LIST_INT_STS:
  295. return SPRD_DMA_LIST_INT;
  296. case SPRD_DMA_TRSC_INT_STS:
  297. return SPRD_DMA_TRANS_INT;
  298. case SPRD_DMA_BLK_INT_STS:
  299. return SPRD_DMA_BLK_INT;
  300. case SPRD_DMA_FRAG_INT_STS:
  301. return SPRD_DMA_FRAG_INT;
  302. default:
  303. dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n");
  304. return SPRD_DMA_NO_INT;
  305. }
  306. }
  307. static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan)
  308. {
  309. u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
  310. return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK;
  311. }
  312. static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
  313. struct sprd_dma_desc *sdesc)
  314. {
  315. struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw;
  316. writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE);
  317. writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG);
  318. writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC);
  319. writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
  320. writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
  321. writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
  322. writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN);
  323. writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN);
  324. writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP);
  325. writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR);
  326. writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO);
  327. writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR);
  328. writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP);
  329. writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP);
  330. writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP);
  331. writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
  332. }
  333. static void sprd_dma_start(struct sprd_dma_chn *schan)
  334. {
  335. struct virt_dma_desc *vd = vchan_next_desc(&schan->vc);
  336. if (!vd)
  337. return;
  338. list_del(&vd->node);
  339. schan->cur_desc = to_sprd_dma_desc(vd);
  340. /*
  341. * Copy the DMA configuration from DMA descriptor to this hardware
  342. * channel.
  343. */
  344. sprd_dma_set_chn_config(schan, schan->cur_desc);
  345. sprd_dma_set_uid(schan);
  346. sprd_dma_enable_chn(schan);
  347. if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
  348. sprd_dma_soft_request(schan);
  349. }
  350. static void sprd_dma_stop(struct sprd_dma_chn *schan)
  351. {
  352. sprd_dma_stop_and_disable(schan);
  353. sprd_dma_unset_uid(schan);
  354. sprd_dma_clear_int(schan);
  355. }
  356. static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc,
  357. enum sprd_dma_int_type int_type,
  358. enum sprd_dma_req_mode req_mode)
  359. {
  360. if (int_type == SPRD_DMA_NO_INT)
  361. return false;
  362. if (int_type >= req_mode + 1)
  363. return true;
  364. else
  365. return false;
  366. }
  367. static irqreturn_t dma_irq_handle(int irq, void *dev_id)
  368. {
  369. struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
  370. u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS);
  371. struct sprd_dma_chn *schan;
  372. struct sprd_dma_desc *sdesc;
  373. enum sprd_dma_req_mode req_type;
  374. enum sprd_dma_int_type int_type;
  375. bool trans_done = false;
  376. u32 i;
  377. while (irq_status) {
  378. i = __ffs(irq_status);
  379. irq_status &= (irq_status - 1);
  380. schan = &sdev->channels[i];
  381. spin_lock(&schan->vc.lock);
  382. int_type = sprd_dma_get_int_type(schan);
  383. req_type = sprd_dma_get_req_type(schan);
  384. sprd_dma_clear_int(schan);
  385. sdesc = schan->cur_desc;
  386. /* Check if the dma request descriptor is done. */
  387. trans_done = sprd_dma_check_trans_done(sdesc, int_type,
  388. req_type);
  389. if (trans_done == true) {
  390. vchan_cookie_complete(&sdesc->vd);
  391. schan->cur_desc = NULL;
  392. sprd_dma_start(schan);
  393. }
  394. spin_unlock(&schan->vc.lock);
  395. }
  396. return IRQ_HANDLED;
  397. }
  398. static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
  399. {
  400. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  401. int ret;
  402. ret = pm_runtime_get_sync(chan->device->dev);
  403. if (ret < 0)
  404. return ret;
  405. schan->dev_id = SPRD_DMA_SOFTWARE_UID;
  406. return 0;
  407. }
  408. static void sprd_dma_free_chan_resources(struct dma_chan *chan)
  409. {
  410. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  411. unsigned long flags;
  412. spin_lock_irqsave(&schan->vc.lock, flags);
  413. sprd_dma_stop(schan);
  414. spin_unlock_irqrestore(&schan->vc.lock, flags);
  415. vchan_free_chan_resources(&schan->vc);
  416. pm_runtime_put(chan->device->dev);
  417. }
  418. static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
  419. dma_cookie_t cookie,
  420. struct dma_tx_state *txstate)
  421. {
  422. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  423. struct virt_dma_desc *vd;
  424. unsigned long flags;
  425. enum dma_status ret;
  426. u32 pos;
  427. ret = dma_cookie_status(chan, cookie, txstate);
  428. if (ret == DMA_COMPLETE || !txstate)
  429. return ret;
  430. spin_lock_irqsave(&schan->vc.lock, flags);
  431. vd = vchan_find_desc(&schan->vc, cookie);
  432. if (vd) {
  433. struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
  434. struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
  435. if (hw->trsc_len > 0)
  436. pos = hw->trsc_len;
  437. else if (hw->blk_len > 0)
  438. pos = hw->blk_len;
  439. else if (hw->frg_len > 0)
  440. pos = hw->frg_len;
  441. else
  442. pos = 0;
  443. } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
  444. pos = sprd_dma_get_dst_addr(schan);
  445. } else {
  446. pos = 0;
  447. }
  448. spin_unlock_irqrestore(&schan->vc.lock, flags);
  449. dma_set_residue(txstate, pos);
  450. return ret;
  451. }
  452. static void sprd_dma_issue_pending(struct dma_chan *chan)
  453. {
  454. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  455. unsigned long flags;
  456. spin_lock_irqsave(&schan->vc.lock, flags);
  457. if (vchan_issue_pending(&schan->vc) && !schan->cur_desc)
  458. sprd_dma_start(schan);
  459. spin_unlock_irqrestore(&schan->vc.lock, flags);
  460. }
  461. static int sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
  462. {
  463. switch (buswidth) {
  464. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  465. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  466. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  467. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  468. return ffs(buswidth) - 1;
  469. default:
  470. return -EINVAL;
  471. }
  472. }
  473. static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
  474. {
  475. switch (buswidth) {
  476. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  477. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  478. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  479. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  480. return buswidth;
  481. default:
  482. return -EINVAL;
  483. }
  484. }
  485. static int sprd_dma_fill_desc(struct dma_chan *chan,
  486. struct sprd_dma_desc *sdesc,
  487. dma_addr_t src, dma_addr_t dst, u32 len,
  488. enum dma_transfer_direction dir,
  489. unsigned long flags,
  490. struct dma_slave_config *slave_cfg)
  491. {
  492. struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
  493. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  494. struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
  495. u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
  496. u32 int_mode = flags & SPRD_DMA_INT_MASK;
  497. int src_datawidth, dst_datawidth, src_step, dst_step;
  498. u32 temp, fix_mode = 0, fix_en = 0;
  499. if (dir == DMA_MEM_TO_DEV) {
  500. src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
  501. if (src_step < 0) {
  502. dev_err(sdev->dma_dev.dev, "invalid source step\n");
  503. return src_step;
  504. }
  505. dst_step = SPRD_DMA_NONE_STEP;
  506. } else {
  507. dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width);
  508. if (dst_step < 0) {
  509. dev_err(sdev->dma_dev.dev, "invalid destination step\n");
  510. return dst_step;
  511. }
  512. src_step = SPRD_DMA_NONE_STEP;
  513. }
  514. src_datawidth = sprd_dma_get_datawidth(slave_cfg->src_addr_width);
  515. if (src_datawidth < 0) {
  516. dev_err(sdev->dma_dev.dev, "invalid source datawidth\n");
  517. return src_datawidth;
  518. }
  519. dst_datawidth = sprd_dma_get_datawidth(slave_cfg->dst_addr_width);
  520. if (dst_datawidth < 0) {
  521. dev_err(sdev->dma_dev.dev, "invalid destination datawidth\n");
  522. return dst_datawidth;
  523. }
  524. if (slave_cfg->slave_id)
  525. schan->dev_id = slave_cfg->slave_id;
  526. hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
  527. /*
  528. * wrap_ptr and wrap_to will save the high 4 bits source address and
  529. * destination address.
  530. */
  531. hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
  532. hw->wrap_to = (dst >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
  533. hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
  534. hw->des_addr = dst & SPRD_DMA_LOW_ADDR_MASK;
  535. /*
  536. * If the src step and dst step both are 0 or both are not 0, that means
  537. * we can not enable the fix mode. If one is 0 and another one is not,
  538. * we can enable the fix mode.
  539. */
  540. if ((src_step != 0 && dst_step != 0) || (src_step | dst_step) == 0) {
  541. fix_en = 0;
  542. } else {
  543. fix_en = 1;
  544. if (src_step)
  545. fix_mode = 1;
  546. else
  547. fix_mode = 0;
  548. }
  549. hw->intc = int_mode | SPRD_DMA_CFG_ERR_INT_EN;
  550. temp = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
  551. temp |= dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
  552. temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET;
  553. temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET;
  554. temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET;
  555. temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
  556. hw->frg_len = temp;
  557. hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
  558. hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
  559. temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
  560. temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
  561. hw->trsf_step = temp;
  562. hw->frg_step = 0;
  563. hw->src_blk_step = 0;
  564. hw->des_blk_step = 0;
  565. return 0;
  566. }
  567. static struct dma_async_tx_descriptor *
  568. sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  569. size_t len, unsigned long flags)
  570. {
  571. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  572. struct sprd_dma_desc *sdesc;
  573. struct sprd_dma_chn_hw *hw;
  574. enum sprd_dma_datawidth datawidth;
  575. u32 step, temp;
  576. sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
  577. if (!sdesc)
  578. return NULL;
  579. hw = &sdesc->chn_hw;
  580. hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
  581. hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
  582. hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
  583. hw->des_addr = dest & SPRD_DMA_LOW_ADDR_MASK;
  584. hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
  585. SPRD_DMA_HIGH_ADDR_MASK;
  586. hw->wrap_to = (dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
  587. SPRD_DMA_HIGH_ADDR_MASK;
  588. if (IS_ALIGNED(len, 8)) {
  589. datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
  590. step = SPRD_DMA_DWORD_STEP;
  591. } else if (IS_ALIGNED(len, 4)) {
  592. datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
  593. step = SPRD_DMA_WORD_STEP;
  594. } else if (IS_ALIGNED(len, 2)) {
  595. datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
  596. step = SPRD_DMA_SHORT_STEP;
  597. } else {
  598. datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
  599. step = SPRD_DMA_BYTE_STEP;
  600. }
  601. temp = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
  602. temp |= datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
  603. temp |= SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET;
  604. temp |= len & SPRD_DMA_FRG_LEN_MASK;
  605. hw->frg_len = temp;
  606. hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
  607. hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
  608. temp = (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
  609. temp |= (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
  610. hw->trsf_step = temp;
  611. return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
  612. }
  613. static struct dma_async_tx_descriptor *
  614. sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  615. unsigned int sglen, enum dma_transfer_direction dir,
  616. unsigned long flags, void *context)
  617. {
  618. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  619. struct dma_slave_config *slave_cfg = &schan->slave_cfg;
  620. dma_addr_t src = 0, dst = 0;
  621. struct sprd_dma_desc *sdesc;
  622. struct scatterlist *sg;
  623. u32 len = 0;
  624. int ret, i;
  625. /* TODO: now we only support one sg for each DMA configuration. */
  626. if (!is_slave_direction(dir) || sglen > 1)
  627. return NULL;
  628. sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
  629. if (!sdesc)
  630. return NULL;
  631. for_each_sg(sgl, sg, sglen, i) {
  632. len = sg_dma_len(sg);
  633. if (dir == DMA_MEM_TO_DEV) {
  634. src = sg_dma_address(sg);
  635. dst = slave_cfg->dst_addr;
  636. } else {
  637. src = slave_cfg->src_addr;
  638. dst = sg_dma_address(sg);
  639. }
  640. }
  641. ret = sprd_dma_fill_desc(chan, sdesc, src, dst, len, dir, flags,
  642. slave_cfg);
  643. if (ret) {
  644. kfree(sdesc);
  645. return NULL;
  646. }
  647. return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
  648. }
  649. static int sprd_dma_slave_config(struct dma_chan *chan,
  650. struct dma_slave_config *config)
  651. {
  652. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  653. struct dma_slave_config *slave_cfg = &schan->slave_cfg;
  654. if (!is_slave_direction(config->direction))
  655. return -EINVAL;
  656. memcpy(slave_cfg, config, sizeof(*config));
  657. return 0;
  658. }
  659. static int sprd_dma_pause(struct dma_chan *chan)
  660. {
  661. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  662. unsigned long flags;
  663. spin_lock_irqsave(&schan->vc.lock, flags);
  664. sprd_dma_pause_resume(schan, true);
  665. spin_unlock_irqrestore(&schan->vc.lock, flags);
  666. return 0;
  667. }
  668. static int sprd_dma_resume(struct dma_chan *chan)
  669. {
  670. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  671. unsigned long flags;
  672. spin_lock_irqsave(&schan->vc.lock, flags);
  673. sprd_dma_pause_resume(schan, false);
  674. spin_unlock_irqrestore(&schan->vc.lock, flags);
  675. return 0;
  676. }
  677. static int sprd_dma_terminate_all(struct dma_chan *chan)
  678. {
  679. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  680. unsigned long flags;
  681. LIST_HEAD(head);
  682. spin_lock_irqsave(&schan->vc.lock, flags);
  683. sprd_dma_stop(schan);
  684. vchan_get_all_descriptors(&schan->vc, &head);
  685. spin_unlock_irqrestore(&schan->vc.lock, flags);
  686. vchan_dma_desc_free_list(&schan->vc, &head);
  687. return 0;
  688. }
  689. static void sprd_dma_free_desc(struct virt_dma_desc *vd)
  690. {
  691. struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
  692. kfree(sdesc);
  693. }
  694. static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
  695. {
  696. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  697. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  698. u32 req = *(u32 *)param;
  699. if (req < sdev->total_chns)
  700. return req == schan->chn_num + 1;
  701. else
  702. return false;
  703. }
  704. static int sprd_dma_probe(struct platform_device *pdev)
  705. {
  706. struct device_node *np = pdev->dev.of_node;
  707. struct sprd_dma_dev *sdev;
  708. struct sprd_dma_chn *dma_chn;
  709. struct resource *res;
  710. u32 chn_count;
  711. int ret, i;
  712. ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
  713. if (ret) {
  714. dev_err(&pdev->dev, "get dma channels count failed\n");
  715. return ret;
  716. }
  717. sdev = devm_kzalloc(&pdev->dev,
  718. struct_size(sdev, channels, chn_count),
  719. GFP_KERNEL);
  720. if (!sdev)
  721. return -ENOMEM;
  722. sdev->clk = devm_clk_get(&pdev->dev, "enable");
  723. if (IS_ERR(sdev->clk)) {
  724. dev_err(&pdev->dev, "get enable clock failed\n");
  725. return PTR_ERR(sdev->clk);
  726. }
  727. /* ashb clock is optional for AGCP DMA */
  728. sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb");
  729. if (IS_ERR(sdev->ashb_clk))
  730. dev_warn(&pdev->dev, "no optional ashb eb clock\n");
  731. /*
  732. * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
  733. * DMA controller, it can or do not request the irq, which will save
  734. * system power without resuming system by DMA interrupts if AGCP DMA
  735. * does not request the irq. Thus the DMA interrupts property should
  736. * be optional.
  737. */
  738. sdev->irq = platform_get_irq(pdev, 0);
  739. if (sdev->irq > 0) {
  740. ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle,
  741. 0, "sprd_dma", (void *)sdev);
  742. if (ret < 0) {
  743. dev_err(&pdev->dev, "request dma irq failed\n");
  744. return ret;
  745. }
  746. } else {
  747. dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
  748. }
  749. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  750. sdev->glb_base = devm_ioremap_resource(&pdev->dev, res);
  751. if (IS_ERR(sdev->glb_base))
  752. return PTR_ERR(sdev->glb_base);
  753. dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
  754. sdev->total_chns = chn_count;
  755. sdev->dma_dev.chancnt = chn_count;
  756. INIT_LIST_HEAD(&sdev->dma_dev.channels);
  757. INIT_LIST_HEAD(&sdev->dma_dev.global_node);
  758. sdev->dma_dev.dev = &pdev->dev;
  759. sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
  760. sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
  761. sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
  762. sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
  763. sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
  764. sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
  765. sdev->dma_dev.device_config = sprd_dma_slave_config;
  766. sdev->dma_dev.device_pause = sprd_dma_pause;
  767. sdev->dma_dev.device_resume = sprd_dma_resume;
  768. sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
  769. for (i = 0; i < chn_count; i++) {
  770. dma_chn = &sdev->channels[i];
  771. dma_chn->chn_num = i;
  772. dma_chn->cur_desc = NULL;
  773. /* get each channel's registers base address. */
  774. dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET +
  775. SPRD_DMA_CHN_REG_LENGTH * i;
  776. dma_chn->vc.desc_free = sprd_dma_free_desc;
  777. vchan_init(&dma_chn->vc, &sdev->dma_dev);
  778. }
  779. platform_set_drvdata(pdev, sdev);
  780. ret = sprd_dma_enable(sdev);
  781. if (ret)
  782. return ret;
  783. pm_runtime_set_active(&pdev->dev);
  784. pm_runtime_enable(&pdev->dev);
  785. ret = pm_runtime_get_sync(&pdev->dev);
  786. if (ret < 0)
  787. goto err_rpm;
  788. ret = dma_async_device_register(&sdev->dma_dev);
  789. if (ret < 0) {
  790. dev_err(&pdev->dev, "register dma device failed:%d\n", ret);
  791. goto err_register;
  792. }
  793. sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
  794. ret = of_dma_controller_register(np, of_dma_simple_xlate,
  795. &sprd_dma_info);
  796. if (ret)
  797. goto err_of_register;
  798. pm_runtime_put(&pdev->dev);
  799. return 0;
  800. err_of_register:
  801. dma_async_device_unregister(&sdev->dma_dev);
  802. err_register:
  803. pm_runtime_put_noidle(&pdev->dev);
  804. pm_runtime_disable(&pdev->dev);
  805. err_rpm:
  806. sprd_dma_disable(sdev);
  807. return ret;
  808. }
  809. static int sprd_dma_remove(struct platform_device *pdev)
  810. {
  811. struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
  812. struct sprd_dma_chn *c, *cn;
  813. int ret;
  814. ret = pm_runtime_get_sync(&pdev->dev);
  815. if (ret < 0)
  816. return ret;
  817. /* explicitly free the irq */
  818. if (sdev->irq > 0)
  819. devm_free_irq(&pdev->dev, sdev->irq, sdev);
  820. list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
  821. vc.chan.device_node) {
  822. list_del(&c->vc.chan.device_node);
  823. tasklet_kill(&c->vc.task);
  824. }
  825. of_dma_controller_free(pdev->dev.of_node);
  826. dma_async_device_unregister(&sdev->dma_dev);
  827. sprd_dma_disable(sdev);
  828. pm_runtime_put_noidle(&pdev->dev);
  829. pm_runtime_disable(&pdev->dev);
  830. return 0;
  831. }
  832. static const struct of_device_id sprd_dma_match[] = {
  833. { .compatible = "sprd,sc9860-dma", },
  834. {},
  835. };
  836. static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
  837. {
  838. struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
  839. sprd_dma_disable(sdev);
  840. return 0;
  841. }
  842. static int __maybe_unused sprd_dma_runtime_resume(struct device *dev)
  843. {
  844. struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
  845. int ret;
  846. ret = sprd_dma_enable(sdev);
  847. if (ret)
  848. dev_err(sdev->dma_dev.dev, "enable dma failed\n");
  849. return ret;
  850. }
  851. static const struct dev_pm_ops sprd_dma_pm_ops = {
  852. SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend,
  853. sprd_dma_runtime_resume,
  854. NULL)
  855. };
  856. static struct platform_driver sprd_dma_driver = {
  857. .probe = sprd_dma_probe,
  858. .remove = sprd_dma_remove,
  859. .driver = {
  860. .name = "sprd-dma",
  861. .of_match_table = sprd_dma_match,
  862. .pm = &sprd_dma_pm_ops,
  863. },
  864. };
  865. module_platform_driver(sprd_dma_driver);
  866. MODULE_LICENSE("GPL v2");
  867. MODULE_DESCRIPTION("DMA driver for Spreadtrum");
  868. MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
  869. MODULE_ALIAS("platform:sprd-dma");