sprd-dma.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988
  1. /*
  2. * Copyright (C) 2017 Spreadtrum Communications Inc.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/errno.h>
  9. #include <linux/init.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/of.h>
  15. #include <linux/of_dma.h>
  16. #include <linux/of_device.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/slab.h>
  19. #include "virt-dma.h"
  20. #define SPRD_DMA_CHN_REG_OFFSET 0x1000
  21. #define SPRD_DMA_CHN_REG_LENGTH 0x40
  22. #define SPRD_DMA_MEMCPY_MIN_SIZE 64
  23. /* DMA global registers definition */
  24. #define SPRD_DMA_GLB_PAUSE 0x0
  25. #define SPRD_DMA_GLB_FRAG_WAIT 0x4
  26. #define SPRD_DMA_GLB_REQ_PEND0_EN 0x8
  27. #define SPRD_DMA_GLB_REQ_PEND1_EN 0xc
  28. #define SPRD_DMA_GLB_INT_RAW_STS 0x10
  29. #define SPRD_DMA_GLB_INT_MSK_STS 0x14
  30. #define SPRD_DMA_GLB_REQ_STS 0x18
  31. #define SPRD_DMA_GLB_CHN_EN_STS 0x1c
  32. #define SPRD_DMA_GLB_DEBUG_STS 0x20
  33. #define SPRD_DMA_GLB_ARB_SEL_STS 0x24
  34. #define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1))
  35. #define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000
  36. /* DMA channel registers definition */
  37. #define SPRD_DMA_CHN_PAUSE 0x0
  38. #define SPRD_DMA_CHN_REQ 0x4
  39. #define SPRD_DMA_CHN_CFG 0x8
  40. #define SPRD_DMA_CHN_INTC 0xc
  41. #define SPRD_DMA_CHN_SRC_ADDR 0x10
  42. #define SPRD_DMA_CHN_DES_ADDR 0x14
  43. #define SPRD_DMA_CHN_FRG_LEN 0x18
  44. #define SPRD_DMA_CHN_BLK_LEN 0x1c
  45. #define SPRD_DMA_CHN_TRSC_LEN 0x20
  46. #define SPRD_DMA_CHN_TRSF_STEP 0x24
  47. #define SPRD_DMA_CHN_WARP_PTR 0x28
  48. #define SPRD_DMA_CHN_WARP_TO 0x2c
  49. #define SPRD_DMA_CHN_LLIST_PTR 0x30
  50. #define SPRD_DMA_CHN_FRAG_STEP 0x34
  51. #define SPRD_DMA_CHN_SRC_BLK_STEP 0x38
  52. #define SPRD_DMA_CHN_DES_BLK_STEP 0x3c
  53. /* SPRD_DMA_CHN_INTC register definition */
  54. #define SPRD_DMA_INT_MASK GENMASK(4, 0)
  55. #define SPRD_DMA_INT_CLR_OFFSET 24
  56. #define SPRD_DMA_FRAG_INT_EN BIT(0)
  57. #define SPRD_DMA_BLK_INT_EN BIT(1)
  58. #define SPRD_DMA_TRANS_INT_EN BIT(2)
  59. #define SPRD_DMA_LIST_INT_EN BIT(3)
  60. #define SPRD_DMA_CFG_ERR_INT_EN BIT(4)
  61. /* SPRD_DMA_CHN_CFG register definition */
  62. #define SPRD_DMA_CHN_EN BIT(0)
  63. #define SPRD_DMA_WAIT_BDONE_OFFSET 24
  64. #define SPRD_DMA_DONOT_WAIT_BDONE 1
  65. /* SPRD_DMA_CHN_REQ register definition */
  66. #define SPRD_DMA_REQ_EN BIT(0)
  67. /* SPRD_DMA_CHN_PAUSE register definition */
  68. #define SPRD_DMA_PAUSE_EN BIT(0)
  69. #define SPRD_DMA_PAUSE_STS BIT(2)
  70. #define SPRD_DMA_PAUSE_CNT 0x2000
  71. /* DMA_CHN_WARP_* register definition */
  72. #define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
  73. #define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
  74. #define SPRD_DMA_HIGH_ADDR_OFFSET 4
  75. /* SPRD_DMA_CHN_INTC register definition */
  76. #define SPRD_DMA_FRAG_INT_STS BIT(16)
  77. #define SPRD_DMA_BLK_INT_STS BIT(17)
  78. #define SPRD_DMA_TRSC_INT_STS BIT(18)
  79. #define SPRD_DMA_LIST_INT_STS BIT(19)
  80. #define SPRD_DMA_CFGERR_INT_STS BIT(20)
  81. #define SPRD_DMA_CHN_INT_STS \
  82. (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \
  83. SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \
  84. SPRD_DMA_CFGERR_INT_STS)
  85. /* SPRD_DMA_CHN_FRG_LEN register definition */
  86. #define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30
  87. #define SPRD_DMA_DES_DATAWIDTH_OFFSET 28
  88. #define SPRD_DMA_SWT_MODE_OFFSET 26
  89. #define SPRD_DMA_REQ_MODE_OFFSET 24
  90. #define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
  91. #define SPRD_DMA_FIX_SEL_OFFSET 21
  92. #define SPRD_DMA_FIX_EN_OFFSET 20
  93. #define SPRD_DMA_LLIST_END_OFFSET 19
  94. #define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
  95. /* SPRD_DMA_CHN_BLK_LEN register definition */
  96. #define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0)
  97. /* SPRD_DMA_CHN_TRSC_LEN register definition */
  98. #define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0)
  99. /* SPRD_DMA_CHN_TRSF_STEP register definition */
  100. #define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16
  101. #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
  102. #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
  103. #define SPRD_DMA_SOFTWARE_UID 0
  104. /*
  105. * enum sprd_dma_req_mode: define the DMA request mode
  106. * @SPRD_DMA_FRAG_REQ: fragment request mode
  107. * @SPRD_DMA_BLK_REQ: block request mode
  108. * @SPRD_DMA_TRANS_REQ: transaction request mode
  109. * @SPRD_DMA_LIST_REQ: link-list request mode
  110. *
  111. * We have 4 types request mode: fragment mode, block mode, transaction mode
  112. * and linklist mode. One transaction can contain several blocks, one block can
  113. * contain several fragments. Link-list mode means we can save several DMA
  114. * configuration into one reserved memory, then DMA can fetch each DMA
  115. * configuration automatically to start transfer.
  116. */
  117. enum sprd_dma_req_mode {
  118. SPRD_DMA_FRAG_REQ,
  119. SPRD_DMA_BLK_REQ,
  120. SPRD_DMA_TRANS_REQ,
  121. SPRD_DMA_LIST_REQ,
  122. };
  123. /*
  124. * enum sprd_dma_int_type: define the DMA interrupt type
  125. * @SPRD_DMA_NO_INT: do not need generate DMA interrupts.
  126. * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request
  127. * is done.
  128. * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done.
  129. * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment
  130. * or one block request is done.
  131. * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction
  132. * request is done.
  133. * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one
  134. * transaction request or fragment request is done.
  135. * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one
  136. * transaction request or block request is done.
  137. * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request
  138. * is done.
  139. * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is
  140. * incorrect.
  141. */
  142. enum sprd_dma_int_type {
  143. SPRD_DMA_NO_INT,
  144. SPRD_DMA_FRAG_INT,
  145. SPRD_DMA_BLK_INT,
  146. SPRD_DMA_BLK_FRAG_INT,
  147. SPRD_DMA_TRANS_INT,
  148. SPRD_DMA_TRANS_FRAG_INT,
  149. SPRD_DMA_TRANS_BLK_INT,
  150. SPRD_DMA_LIST_INT,
  151. SPRD_DMA_CFGERR_INT,
  152. };
  153. /* dma channel hardware configuration */
  154. struct sprd_dma_chn_hw {
  155. u32 pause;
  156. u32 req;
  157. u32 cfg;
  158. u32 intc;
  159. u32 src_addr;
  160. u32 des_addr;
  161. u32 frg_len;
  162. u32 blk_len;
  163. u32 trsc_len;
  164. u32 trsf_step;
  165. u32 wrap_ptr;
  166. u32 wrap_to;
  167. u32 llist_ptr;
  168. u32 frg_step;
  169. u32 src_blk_step;
  170. u32 des_blk_step;
  171. };
  172. /* dma request description */
  173. struct sprd_dma_desc {
  174. struct virt_dma_desc vd;
  175. struct sprd_dma_chn_hw chn_hw;
  176. };
  177. /* dma channel description */
  178. struct sprd_dma_chn {
  179. struct virt_dma_chan vc;
  180. void __iomem *chn_base;
  181. u32 chn_num;
  182. u32 dev_id;
  183. struct sprd_dma_desc *cur_desc;
  184. };
  185. /* SPRD dma device */
  186. struct sprd_dma_dev {
  187. struct dma_device dma_dev;
  188. void __iomem *glb_base;
  189. struct clk *clk;
  190. struct clk *ashb_clk;
  191. int irq;
  192. u32 total_chns;
  193. struct sprd_dma_chn channels[0];
  194. };
  195. static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
  196. static struct of_dma_filter_info sprd_dma_info = {
  197. .filter_fn = sprd_dma_filter_fn,
  198. };
  199. static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
  200. {
  201. return container_of(c, struct sprd_dma_chn, vc.chan);
  202. }
  203. static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
  204. {
  205. struct sprd_dma_chn *schan = to_sprd_dma_chan(c);
  206. return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]);
  207. }
  208. static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd)
  209. {
  210. return container_of(vd, struct sprd_dma_desc, vd);
  211. }
  212. static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg,
  213. u32 mask, u32 val)
  214. {
  215. u32 orig = readl(schan->chn_base + reg);
  216. u32 tmp;
  217. tmp = (orig & ~mask) | val;
  218. writel(tmp, schan->chn_base + reg);
  219. }
  220. static int sprd_dma_enable(struct sprd_dma_dev *sdev)
  221. {
  222. int ret;
  223. ret = clk_prepare_enable(sdev->clk);
  224. if (ret)
  225. return ret;
  226. /*
  227. * The ashb_clk is optional and only for AGCP DMA controller, so we
  228. * need add one condition to check if the ashb_clk need enable.
  229. */
  230. if (!IS_ERR(sdev->ashb_clk))
  231. ret = clk_prepare_enable(sdev->ashb_clk);
  232. return ret;
  233. }
  234. static void sprd_dma_disable(struct sprd_dma_dev *sdev)
  235. {
  236. clk_disable_unprepare(sdev->clk);
  237. /*
  238. * Need to check if we need disable the optional ashb_clk for AGCP DMA.
  239. */
  240. if (!IS_ERR(sdev->ashb_clk))
  241. clk_disable_unprepare(sdev->ashb_clk);
  242. }
  243. static void sprd_dma_set_uid(struct sprd_dma_chn *schan)
  244. {
  245. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  246. u32 dev_id = schan->dev_id;
  247. if (dev_id != SPRD_DMA_SOFTWARE_UID) {
  248. u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
  249. SPRD_DMA_GLB_REQ_UID(dev_id);
  250. writel(schan->chn_num + 1, sdev->glb_base + uid_offset);
  251. }
  252. }
  253. static void sprd_dma_unset_uid(struct sprd_dma_chn *schan)
  254. {
  255. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  256. u32 dev_id = schan->dev_id;
  257. if (dev_id != SPRD_DMA_SOFTWARE_UID) {
  258. u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
  259. SPRD_DMA_GLB_REQ_UID(dev_id);
  260. writel(0, sdev->glb_base + uid_offset);
  261. }
  262. }
  263. static void sprd_dma_clear_int(struct sprd_dma_chn *schan)
  264. {
  265. sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC,
  266. SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET,
  267. SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET);
  268. }
  269. static void sprd_dma_enable_chn(struct sprd_dma_chn *schan)
  270. {
  271. sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN,
  272. SPRD_DMA_CHN_EN);
  273. }
  274. static void sprd_dma_disable_chn(struct sprd_dma_chn *schan)
  275. {
  276. sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0);
  277. }
  278. static void sprd_dma_soft_request(struct sprd_dma_chn *schan)
  279. {
  280. sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN,
  281. SPRD_DMA_REQ_EN);
  282. }
  283. static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable)
  284. {
  285. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  286. u32 pause, timeout = SPRD_DMA_PAUSE_CNT;
  287. if (enable) {
  288. sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
  289. SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN);
  290. do {
  291. pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE);
  292. if (pause & SPRD_DMA_PAUSE_STS)
  293. break;
  294. cpu_relax();
  295. } while (--timeout > 0);
  296. if (!timeout)
  297. dev_warn(sdev->dma_dev.dev,
  298. "pause dma controller timeout\n");
  299. } else {
  300. sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
  301. SPRD_DMA_PAUSE_EN, 0);
  302. }
  303. }
  304. static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan)
  305. {
  306. u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG);
  307. if (!(cfg & SPRD_DMA_CHN_EN))
  308. return;
  309. sprd_dma_pause_resume(schan, true);
  310. sprd_dma_disable_chn(schan);
  311. }
  312. static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan)
  313. {
  314. unsigned long addr, addr_high;
  315. addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
  316. addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) &
  317. SPRD_DMA_HIGH_ADDR_MASK;
  318. return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
  319. }
  320. static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan)
  321. {
  322. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  323. u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) &
  324. SPRD_DMA_CHN_INT_STS;
  325. switch (intc_sts) {
  326. case SPRD_DMA_CFGERR_INT_STS:
  327. return SPRD_DMA_CFGERR_INT;
  328. case SPRD_DMA_LIST_INT_STS:
  329. return SPRD_DMA_LIST_INT;
  330. case SPRD_DMA_TRSC_INT_STS:
  331. return SPRD_DMA_TRANS_INT;
  332. case SPRD_DMA_BLK_INT_STS:
  333. return SPRD_DMA_BLK_INT;
  334. case SPRD_DMA_FRAG_INT_STS:
  335. return SPRD_DMA_FRAG_INT;
  336. default:
  337. dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n");
  338. return SPRD_DMA_NO_INT;
  339. }
  340. }
  341. static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan)
  342. {
  343. u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
  344. return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK;
  345. }
  346. static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
  347. struct sprd_dma_desc *sdesc)
  348. {
  349. struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw;
  350. writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE);
  351. writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG);
  352. writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC);
  353. writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
  354. writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
  355. writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
  356. writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN);
  357. writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN);
  358. writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP);
  359. writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR);
  360. writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO);
  361. writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR);
  362. writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP);
  363. writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP);
  364. writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP);
  365. writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
  366. }
  367. static void sprd_dma_start(struct sprd_dma_chn *schan)
  368. {
  369. struct virt_dma_desc *vd = vchan_next_desc(&schan->vc);
  370. if (!vd)
  371. return;
  372. list_del(&vd->node);
  373. schan->cur_desc = to_sprd_dma_desc(vd);
  374. /*
  375. * Copy the DMA configuration from DMA descriptor to this hardware
  376. * channel.
  377. */
  378. sprd_dma_set_chn_config(schan, schan->cur_desc);
  379. sprd_dma_set_uid(schan);
  380. sprd_dma_enable_chn(schan);
  381. if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
  382. sprd_dma_soft_request(schan);
  383. }
  384. static void sprd_dma_stop(struct sprd_dma_chn *schan)
  385. {
  386. sprd_dma_stop_and_disable(schan);
  387. sprd_dma_unset_uid(schan);
  388. sprd_dma_clear_int(schan);
  389. }
  390. static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc,
  391. enum sprd_dma_int_type int_type,
  392. enum sprd_dma_req_mode req_mode)
  393. {
  394. if (int_type == SPRD_DMA_NO_INT)
  395. return false;
  396. if (int_type >= req_mode + 1)
  397. return true;
  398. else
  399. return false;
  400. }
  401. static irqreturn_t dma_irq_handle(int irq, void *dev_id)
  402. {
  403. struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
  404. u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS);
  405. struct sprd_dma_chn *schan;
  406. struct sprd_dma_desc *sdesc;
  407. enum sprd_dma_req_mode req_type;
  408. enum sprd_dma_int_type int_type;
  409. bool trans_done = false;
  410. u32 i;
  411. while (irq_status) {
  412. i = __ffs(irq_status);
  413. irq_status &= (irq_status - 1);
  414. schan = &sdev->channels[i];
  415. spin_lock(&schan->vc.lock);
  416. int_type = sprd_dma_get_int_type(schan);
  417. req_type = sprd_dma_get_req_type(schan);
  418. sprd_dma_clear_int(schan);
  419. sdesc = schan->cur_desc;
  420. /* Check if the dma request descriptor is done. */
  421. trans_done = sprd_dma_check_trans_done(sdesc, int_type,
  422. req_type);
  423. if (trans_done == true) {
  424. vchan_cookie_complete(&sdesc->vd);
  425. schan->cur_desc = NULL;
  426. sprd_dma_start(schan);
  427. }
  428. spin_unlock(&schan->vc.lock);
  429. }
  430. return IRQ_HANDLED;
  431. }
  432. static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
  433. {
  434. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  435. int ret;
  436. ret = pm_runtime_get_sync(chan->device->dev);
  437. if (ret < 0)
  438. return ret;
  439. schan->dev_id = SPRD_DMA_SOFTWARE_UID;
  440. return 0;
  441. }
  442. static void sprd_dma_free_chan_resources(struct dma_chan *chan)
  443. {
  444. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  445. unsigned long flags;
  446. spin_lock_irqsave(&schan->vc.lock, flags);
  447. sprd_dma_stop(schan);
  448. spin_unlock_irqrestore(&schan->vc.lock, flags);
  449. vchan_free_chan_resources(&schan->vc);
  450. pm_runtime_put(chan->device->dev);
  451. }
  452. static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
  453. dma_cookie_t cookie,
  454. struct dma_tx_state *txstate)
  455. {
  456. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  457. struct virt_dma_desc *vd;
  458. unsigned long flags;
  459. enum dma_status ret;
  460. u32 pos;
  461. ret = dma_cookie_status(chan, cookie, txstate);
  462. if (ret == DMA_COMPLETE || !txstate)
  463. return ret;
  464. spin_lock_irqsave(&schan->vc.lock, flags);
  465. vd = vchan_find_desc(&schan->vc, cookie);
  466. if (vd) {
  467. struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
  468. struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
  469. if (hw->trsc_len > 0)
  470. pos = hw->trsc_len;
  471. else if (hw->blk_len > 0)
  472. pos = hw->blk_len;
  473. else if (hw->frg_len > 0)
  474. pos = hw->frg_len;
  475. else
  476. pos = 0;
  477. } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
  478. pos = sprd_dma_get_dst_addr(schan);
  479. } else {
  480. pos = 0;
  481. }
  482. spin_unlock_irqrestore(&schan->vc.lock, flags);
  483. dma_set_residue(txstate, pos);
  484. return ret;
  485. }
  486. static void sprd_dma_issue_pending(struct dma_chan *chan)
  487. {
  488. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  489. unsigned long flags;
  490. spin_lock_irqsave(&schan->vc.lock, flags);
  491. if (vchan_issue_pending(&schan->vc) && !schan->cur_desc)
  492. sprd_dma_start(schan);
  493. spin_unlock_irqrestore(&schan->vc.lock, flags);
  494. }
  495. static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
  496. dma_addr_t dest, dma_addr_t src, size_t len)
  497. {
  498. struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
  499. struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
  500. u32 datawidth, src_step, des_step, fragment_len;
  501. u32 block_len, req_mode, irq_mode, transcation_len;
  502. u32 fix_mode = 0, fix_en = 0;
  503. if (IS_ALIGNED(len, 4)) {
  504. datawidth = 2;
  505. src_step = 4;
  506. des_step = 4;
  507. } else if (IS_ALIGNED(len, 2)) {
  508. datawidth = 1;
  509. src_step = 2;
  510. des_step = 2;
  511. } else {
  512. datawidth = 0;
  513. src_step = 1;
  514. des_step = 1;
  515. }
  516. fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE;
  517. if (len <= SPRD_DMA_BLK_LEN_MASK) {
  518. block_len = len;
  519. transcation_len = 0;
  520. req_mode = SPRD_DMA_BLK_REQ;
  521. irq_mode = SPRD_DMA_BLK_INT;
  522. } else {
  523. block_len = SPRD_DMA_MEMCPY_MIN_SIZE;
  524. transcation_len = len;
  525. req_mode = SPRD_DMA_TRANS_REQ;
  526. irq_mode = SPRD_DMA_TRANS_INT;
  527. }
  528. hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
  529. hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
  530. SPRD_DMA_HIGH_ADDR_MASK);
  531. hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
  532. SPRD_DMA_HIGH_ADDR_MASK);
  533. hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
  534. hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
  535. if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) {
  536. fix_en = 0;
  537. } else {
  538. fix_en = 1;
  539. if (src_step)
  540. fix_mode = 1;
  541. else
  542. fix_mode = 0;
  543. }
  544. hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
  545. datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
  546. req_mode << SPRD_DMA_REQ_MODE_OFFSET |
  547. fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
  548. fix_en << SPRD_DMA_FIX_EN_OFFSET |
  549. (fragment_len & SPRD_DMA_FRG_LEN_MASK);
  550. hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK;
  551. hw->intc = SPRD_DMA_CFG_ERR_INT_EN;
  552. switch (irq_mode) {
  553. case SPRD_DMA_NO_INT:
  554. break;
  555. case SPRD_DMA_FRAG_INT:
  556. hw->intc |= SPRD_DMA_FRAG_INT_EN;
  557. break;
  558. case SPRD_DMA_BLK_INT:
  559. hw->intc |= SPRD_DMA_BLK_INT_EN;
  560. break;
  561. case SPRD_DMA_BLK_FRAG_INT:
  562. hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN;
  563. break;
  564. case SPRD_DMA_TRANS_INT:
  565. hw->intc |= SPRD_DMA_TRANS_INT_EN;
  566. break;
  567. case SPRD_DMA_TRANS_FRAG_INT:
  568. hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN;
  569. break;
  570. case SPRD_DMA_TRANS_BLK_INT:
  571. hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN;
  572. break;
  573. case SPRD_DMA_LIST_INT:
  574. hw->intc |= SPRD_DMA_LIST_INT_EN;
  575. break;
  576. case SPRD_DMA_CFGERR_INT:
  577. hw->intc |= SPRD_DMA_CFG_ERR_INT_EN;
  578. break;
  579. default:
  580. dev_err(sdev->dma_dev.dev, "invalid irq mode\n");
  581. return -EINVAL;
  582. }
  583. if (transcation_len == 0)
  584. hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK;
  585. else
  586. hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK;
  587. hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) <<
  588. SPRD_DMA_DEST_TRSF_STEP_OFFSET |
  589. (src_step & SPRD_DMA_TRSF_STEP_MASK) <<
  590. SPRD_DMA_SRC_TRSF_STEP_OFFSET;
  591. hw->frg_step = 0;
  592. hw->src_blk_step = 0;
  593. hw->des_blk_step = 0;
  594. hw->src_blk_step = 0;
  595. return 0;
  596. }
  597. static struct dma_async_tx_descriptor *
  598. sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  599. size_t len, unsigned long flags)
  600. {
  601. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  602. struct sprd_dma_desc *sdesc;
  603. int ret;
  604. sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
  605. if (!sdesc)
  606. return NULL;
  607. ret = sprd_dma_config(chan, sdesc, dest, src, len);
  608. if (ret) {
  609. kfree(sdesc);
  610. return NULL;
  611. }
  612. return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
  613. }
  614. static int sprd_dma_pause(struct dma_chan *chan)
  615. {
  616. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  617. unsigned long flags;
  618. spin_lock_irqsave(&schan->vc.lock, flags);
  619. sprd_dma_pause_resume(schan, true);
  620. spin_unlock_irqrestore(&schan->vc.lock, flags);
  621. return 0;
  622. }
  623. static int sprd_dma_resume(struct dma_chan *chan)
  624. {
  625. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  626. unsigned long flags;
  627. spin_lock_irqsave(&schan->vc.lock, flags);
  628. sprd_dma_pause_resume(schan, false);
  629. spin_unlock_irqrestore(&schan->vc.lock, flags);
  630. return 0;
  631. }
  632. static int sprd_dma_terminate_all(struct dma_chan *chan)
  633. {
  634. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  635. unsigned long flags;
  636. LIST_HEAD(head);
  637. spin_lock_irqsave(&schan->vc.lock, flags);
  638. sprd_dma_stop(schan);
  639. vchan_get_all_descriptors(&schan->vc, &head);
  640. spin_unlock_irqrestore(&schan->vc.lock, flags);
  641. vchan_dma_desc_free_list(&schan->vc, &head);
  642. return 0;
  643. }
  644. static void sprd_dma_free_desc(struct virt_dma_desc *vd)
  645. {
  646. struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
  647. kfree(sdesc);
  648. }
  649. static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
  650. {
  651. struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
  652. struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
  653. u32 req = *(u32 *)param;
  654. if (req < sdev->total_chns)
  655. return req == schan->chn_num + 1;
  656. else
  657. return false;
  658. }
  659. static int sprd_dma_probe(struct platform_device *pdev)
  660. {
  661. struct device_node *np = pdev->dev.of_node;
  662. struct sprd_dma_dev *sdev;
  663. struct sprd_dma_chn *dma_chn;
  664. struct resource *res;
  665. u32 chn_count;
  666. int ret, i;
  667. ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
  668. if (ret) {
  669. dev_err(&pdev->dev, "get dma channels count failed\n");
  670. return ret;
  671. }
  672. sdev = devm_kzalloc(&pdev->dev,
  673. struct_size(sdev, channels, chn_count),
  674. GFP_KERNEL);
  675. if (!sdev)
  676. return -ENOMEM;
  677. sdev->clk = devm_clk_get(&pdev->dev, "enable");
  678. if (IS_ERR(sdev->clk)) {
  679. dev_err(&pdev->dev, "get enable clock failed\n");
  680. return PTR_ERR(sdev->clk);
  681. }
  682. /* ashb clock is optional for AGCP DMA */
  683. sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb");
  684. if (IS_ERR(sdev->ashb_clk))
  685. dev_warn(&pdev->dev, "no optional ashb eb clock\n");
  686. /*
  687. * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
  688. * DMA controller, it can or do not request the irq, which will save
  689. * system power without resuming system by DMA interrupts if AGCP DMA
  690. * does not request the irq. Thus the DMA interrupts property should
  691. * be optional.
  692. */
  693. sdev->irq = platform_get_irq(pdev, 0);
  694. if (sdev->irq > 0) {
  695. ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle,
  696. 0, "sprd_dma", (void *)sdev);
  697. if (ret < 0) {
  698. dev_err(&pdev->dev, "request dma irq failed\n");
  699. return ret;
  700. }
  701. } else {
  702. dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
  703. }
  704. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  705. sdev->glb_base = devm_ioremap_nocache(&pdev->dev, res->start,
  706. resource_size(res));
  707. if (!sdev->glb_base)
  708. return -ENOMEM;
  709. dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
  710. sdev->total_chns = chn_count;
  711. sdev->dma_dev.chancnt = chn_count;
  712. INIT_LIST_HEAD(&sdev->dma_dev.channels);
  713. INIT_LIST_HEAD(&sdev->dma_dev.global_node);
  714. sdev->dma_dev.dev = &pdev->dev;
  715. sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
  716. sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
  717. sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
  718. sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
  719. sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
  720. sdev->dma_dev.device_pause = sprd_dma_pause;
  721. sdev->dma_dev.device_resume = sprd_dma_resume;
  722. sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
  723. for (i = 0; i < chn_count; i++) {
  724. dma_chn = &sdev->channels[i];
  725. dma_chn->chn_num = i;
  726. dma_chn->cur_desc = NULL;
  727. /* get each channel's registers base address. */
  728. dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET +
  729. SPRD_DMA_CHN_REG_LENGTH * i;
  730. dma_chn->vc.desc_free = sprd_dma_free_desc;
  731. vchan_init(&dma_chn->vc, &sdev->dma_dev);
  732. }
  733. platform_set_drvdata(pdev, sdev);
  734. ret = sprd_dma_enable(sdev);
  735. if (ret)
  736. return ret;
  737. pm_runtime_set_active(&pdev->dev);
  738. pm_runtime_enable(&pdev->dev);
  739. ret = pm_runtime_get_sync(&pdev->dev);
  740. if (ret < 0)
  741. goto err_rpm;
  742. ret = dma_async_device_register(&sdev->dma_dev);
  743. if (ret < 0) {
  744. dev_err(&pdev->dev, "register dma device failed:%d\n", ret);
  745. goto err_register;
  746. }
  747. sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
  748. ret = of_dma_controller_register(np, of_dma_simple_xlate,
  749. &sprd_dma_info);
  750. if (ret)
  751. goto err_of_register;
  752. pm_runtime_put(&pdev->dev);
  753. return 0;
  754. err_of_register:
  755. dma_async_device_unregister(&sdev->dma_dev);
  756. err_register:
  757. pm_runtime_put_noidle(&pdev->dev);
  758. pm_runtime_disable(&pdev->dev);
  759. err_rpm:
  760. sprd_dma_disable(sdev);
  761. return ret;
  762. }
  763. static int sprd_dma_remove(struct platform_device *pdev)
  764. {
  765. struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
  766. struct sprd_dma_chn *c, *cn;
  767. int ret;
  768. ret = pm_runtime_get_sync(&pdev->dev);
  769. if (ret < 0)
  770. return ret;
  771. /* explicitly free the irq */
  772. if (sdev->irq > 0)
  773. devm_free_irq(&pdev->dev, sdev->irq, sdev);
  774. list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
  775. vc.chan.device_node) {
  776. list_del(&c->vc.chan.device_node);
  777. tasklet_kill(&c->vc.task);
  778. }
  779. of_dma_controller_free(pdev->dev.of_node);
  780. dma_async_device_unregister(&sdev->dma_dev);
  781. sprd_dma_disable(sdev);
  782. pm_runtime_put_noidle(&pdev->dev);
  783. pm_runtime_disable(&pdev->dev);
  784. return 0;
  785. }
  786. static const struct of_device_id sprd_dma_match[] = {
  787. { .compatible = "sprd,sc9860-dma", },
  788. {},
  789. };
  790. static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
  791. {
  792. struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
  793. sprd_dma_disable(sdev);
  794. return 0;
  795. }
  796. static int __maybe_unused sprd_dma_runtime_resume(struct device *dev)
  797. {
  798. struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
  799. int ret;
  800. ret = sprd_dma_enable(sdev);
  801. if (ret)
  802. dev_err(sdev->dma_dev.dev, "enable dma failed\n");
  803. return ret;
  804. }
  805. static const struct dev_pm_ops sprd_dma_pm_ops = {
  806. SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend,
  807. sprd_dma_runtime_resume,
  808. NULL)
  809. };
  810. static struct platform_driver sprd_dma_driver = {
  811. .probe = sprd_dma_probe,
  812. .remove = sprd_dma_remove,
  813. .driver = {
  814. .name = "sprd-dma",
  815. .of_match_table = sprd_dma_match,
  816. .pm = &sprd_dma_pm_ops,
  817. },
  818. };
  819. module_platform_driver(sprd_dma_driver);
  820. MODULE_LICENSE("GPL v2");
  821. MODULE_DESCRIPTION("DMA driver for Spreadtrum");
  822. MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
  823. MODULE_ALIAS("platform:sprd-dma");