dma.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850
  1. /*
  2. * Renesas R-Car Audio DMAC support
  3. *
  4. * Copyright (C) 2015 Renesas Electronics Corp.
  5. * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/delay.h>
  12. #include <linux/of_dma.h>
  13. #include "rsnd.h"
  14. /*
  15. * Audio DMAC peri peri register
  16. */
  17. #define PDMASAR 0x00
  18. #define PDMADAR 0x04
  19. #define PDMACHCR 0x0c
  20. /* PDMACHCR */
  21. #define PDMACHCR_DE (1 << 0)
  22. struct rsnd_dmaen {
  23. struct dma_chan *chan;
  24. dma_addr_t dma_buf;
  25. unsigned int dma_len;
  26. unsigned int dma_period;
  27. unsigned int dma_cnt;
  28. };
  29. struct rsnd_dmapp {
  30. int dmapp_id;
  31. u32 chcr;
  32. };
  33. struct rsnd_dma {
  34. struct rsnd_mod mod;
  35. struct rsnd_mod *mod_from;
  36. struct rsnd_mod *mod_to;
  37. dma_addr_t src_addr;
  38. dma_addr_t dst_addr;
  39. union {
  40. struct rsnd_dmaen en;
  41. struct rsnd_dmapp pp;
  42. } dma;
  43. };
  44. struct rsnd_dma_ctrl {
  45. void __iomem *base;
  46. int dmaen_num;
  47. int dmapp_num;
  48. };
  49. #define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma)
  50. #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod)
  51. #define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en)
  52. #define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp)
  53. /*
  54. * Audio DMAC
  55. */
  56. #define rsnd_dmaen_sync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 1)
  57. #define rsnd_dmaen_unsync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 0)
  58. static void __rsnd_dmaen_sync(struct rsnd_dmaen *dmaen, struct rsnd_dai_stream *io,
  59. int i, int sync)
  60. {
  61. struct device *dev = dmaen->chan->device->dev;
  62. enum dma_data_direction dir;
  63. int is_play = rsnd_io_is_play(io);
  64. dma_addr_t buf;
  65. int len, max;
  66. size_t period;
  67. len = dmaen->dma_len;
  68. period = dmaen->dma_period;
  69. max = len / period;
  70. i = i % max;
  71. buf = dmaen->dma_buf + (period * i);
  72. dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  73. if (sync)
  74. dma_sync_single_for_device(dev, buf, period, dir);
  75. else
  76. dma_sync_single_for_cpu(dev, buf, period, dir);
  77. }
  78. static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
  79. struct rsnd_dai_stream *io)
  80. {
  81. struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
  82. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  83. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  84. bool elapsed = false;
  85. unsigned long flags;
  86. /*
  87. * Renesas sound Gen1 needs 1 DMAC,
  88. * Gen2 needs 2 DMAC.
  89. * In Gen2 case, it are Audio-DMAC, and Audio-DMAC-peri-peri.
  90. * But, Audio-DMAC-peri-peri doesn't have interrupt,
  91. * and this driver is assuming that here.
  92. *
  93. * If Audio-DMAC-peri-peri has interrpt,
  94. * rsnd_dai_pointer_update() will be called twice,
  95. * ant it will breaks io->byte_pos
  96. */
  97. spin_lock_irqsave(&priv->lock, flags);
  98. if (rsnd_io_is_working(io)) {
  99. rsnd_dmaen_unsync(dmaen, io, dmaen->dma_cnt);
  100. /*
  101. * Next period is already started.
  102. * Let's sync Next Next period
  103. * see
  104. * rsnd_dmaen_start()
  105. */
  106. rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2);
  107. elapsed = rsnd_dai_pointer_update(io, io->byte_per_period);
  108. dmaen->dma_cnt++;
  109. }
  110. spin_unlock_irqrestore(&priv->lock, flags);
  111. if (elapsed)
  112. rsnd_dai_period_elapsed(io);
  113. }
  114. static void rsnd_dmaen_complete(void *data)
  115. {
  116. struct rsnd_mod *mod = data;
  117. rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
  118. }
  119. static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
  120. struct rsnd_mod *mod_from,
  121. struct rsnd_mod *mod_to)
  122. {
  123. if ((!mod_from && !mod_to) ||
  124. (mod_from && mod_to))
  125. return NULL;
  126. if (mod_from)
  127. return rsnd_mod_dma_req(io, mod_from);
  128. else
  129. return rsnd_mod_dma_req(io, mod_to);
  130. }
  131. static int rsnd_dmaen_stop(struct rsnd_mod *mod,
  132. struct rsnd_dai_stream *io,
  133. struct rsnd_priv *priv)
  134. {
  135. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  136. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  137. if (dmaen->chan) {
  138. int is_play = rsnd_io_is_play(io);
  139. dmaengine_terminate_all(dmaen->chan);
  140. dma_unmap_single(dmaen->chan->device->dev,
  141. dmaen->dma_buf, dmaen->dma_len,
  142. is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  143. }
  144. return 0;
  145. }
  146. static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
  147. struct rsnd_dai_stream *io,
  148. struct rsnd_priv *priv)
  149. {
  150. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  151. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  152. /*
  153. * DMAEngine release uses mutex lock.
  154. * Thus, it shouldn't be called under spinlock.
  155. * Let's call it under nolock_start
  156. */
  157. if (dmaen->chan)
  158. dma_release_channel(dmaen->chan);
  159. dmaen->chan = NULL;
  160. return 0;
  161. }
  162. static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod,
  163. struct rsnd_dai_stream *io,
  164. struct rsnd_priv *priv)
  165. {
  166. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  167. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  168. struct device *dev = rsnd_priv_to_dev(priv);
  169. if (dmaen->chan) {
  170. dev_err(dev, "it already has dma channel\n");
  171. return -EIO;
  172. }
  173. /*
  174. * DMAEngine request uses mutex lock.
  175. * Thus, it shouldn't be called under spinlock.
  176. * Let's call it under nolock_start
  177. */
  178. dmaen->chan = rsnd_dmaen_request_channel(io,
  179. dma->mod_from,
  180. dma->mod_to);
  181. if (IS_ERR_OR_NULL(dmaen->chan)) {
  182. int ret = PTR_ERR(dmaen->chan);
  183. dmaen->chan = NULL;
  184. dev_err(dev, "can't get dma channel\n");
  185. return ret;
  186. }
  187. return 0;
  188. }
  189. static int rsnd_dmaen_start(struct rsnd_mod *mod,
  190. struct rsnd_dai_stream *io,
  191. struct rsnd_priv *priv)
  192. {
  193. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  194. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  195. struct snd_pcm_substream *substream = io->substream;
  196. struct device *dev = rsnd_priv_to_dev(priv);
  197. struct dma_async_tx_descriptor *desc;
  198. struct dma_slave_config cfg = {};
  199. dma_addr_t buf;
  200. size_t len;
  201. size_t period;
  202. int is_play = rsnd_io_is_play(io);
  203. int i;
  204. int ret;
  205. cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
  206. cfg.src_addr = dma->src_addr;
  207. cfg.dst_addr = dma->dst_addr;
  208. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  209. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  210. dev_dbg(dev, "%s[%d] %pad -> %pad\n",
  211. rsnd_mod_name(mod), rsnd_mod_id(mod),
  212. &cfg.src_addr, &cfg.dst_addr);
  213. ret = dmaengine_slave_config(dmaen->chan, &cfg);
  214. if (ret < 0)
  215. return ret;
  216. len = snd_pcm_lib_buffer_bytes(substream);
  217. period = snd_pcm_lib_period_bytes(substream);
  218. buf = dma_map_single(dmaen->chan->device->dev,
  219. substream->runtime->dma_area,
  220. len,
  221. is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  222. if (dma_mapping_error(dmaen->chan->device->dev, buf)) {
  223. dev_err(dev, "dma map failed\n");
  224. return -EIO;
  225. }
  226. desc = dmaengine_prep_dma_cyclic(dmaen->chan,
  227. buf, len, period,
  228. is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
  229. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  230. if (!desc) {
  231. dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
  232. return -EIO;
  233. }
  234. desc->callback = rsnd_dmaen_complete;
  235. desc->callback_param = rsnd_mod_get(dma);
  236. dmaen->dma_buf = buf;
  237. dmaen->dma_len = len;
  238. dmaen->dma_period = period;
  239. dmaen->dma_cnt = 0;
  240. /*
  241. * synchronize this and next period
  242. * see
  243. * __rsnd_dmaen_complete()
  244. */
  245. for (i = 0; i < 2; i++)
  246. rsnd_dmaen_sync(dmaen, io, i);
  247. if (dmaengine_submit(desc) < 0) {
  248. dev_err(dev, "dmaengine_submit() fail\n");
  249. return -EIO;
  250. }
  251. dma_async_issue_pending(dmaen->chan);
  252. return 0;
  253. }
  254. struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
  255. struct rsnd_mod *mod, char *name)
  256. {
  257. struct dma_chan *chan = NULL;
  258. struct device_node *np;
  259. int i = 0;
  260. for_each_child_of_node(of_node, np) {
  261. if (i == rsnd_mod_id(mod) && (!chan))
  262. chan = of_dma_request_slave_channel(np, name);
  263. i++;
  264. }
  265. /* It should call of_node_put(), since, it is rsnd_xxx_of_node() */
  266. of_node_put(of_node);
  267. return chan;
  268. }
  269. static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
  270. struct rsnd_dma *dma,
  271. struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
  272. {
  273. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  274. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  275. struct dma_chan *chan;
  276. /* try to get DMAEngine channel */
  277. chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
  278. if (IS_ERR_OR_NULL(chan)) {
  279. /*
  280. * DMA failed. try to PIO mode
  281. * see
  282. * rsnd_ssi_fallback()
  283. * rsnd_rdai_continuance_probe()
  284. */
  285. return -EAGAIN;
  286. }
  287. dma_release_channel(chan);
  288. dmac->dmaen_num++;
  289. return 0;
  290. }
  291. static struct rsnd_mod_ops rsnd_dmaen_ops = {
  292. .name = "audmac",
  293. .nolock_start = rsnd_dmaen_nolock_start,
  294. .nolock_stop = rsnd_dmaen_nolock_stop,
  295. .start = rsnd_dmaen_start,
  296. .stop = rsnd_dmaen_stop,
  297. };
  298. /*
  299. * Audio DMAC peri peri
  300. */
  301. static const u8 gen2_id_table_ssiu[] = {
  302. 0x00, /* SSI00 */
  303. 0x04, /* SSI10 */
  304. 0x08, /* SSI20 */
  305. 0x0c, /* SSI3 */
  306. 0x0d, /* SSI4 */
  307. 0x0e, /* SSI5 */
  308. 0x0f, /* SSI6 */
  309. 0x10, /* SSI7 */
  310. 0x11, /* SSI8 */
  311. 0x12, /* SSI90 */
  312. };
  313. static const u8 gen2_id_table_scu[] = {
  314. 0x2d, /* SCU_SRCI0 */
  315. 0x2e, /* SCU_SRCI1 */
  316. 0x2f, /* SCU_SRCI2 */
  317. 0x30, /* SCU_SRCI3 */
  318. 0x31, /* SCU_SRCI4 */
  319. 0x32, /* SCU_SRCI5 */
  320. 0x33, /* SCU_SRCI6 */
  321. 0x34, /* SCU_SRCI7 */
  322. 0x35, /* SCU_SRCI8 */
  323. 0x36, /* SCU_SRCI9 */
  324. };
  325. static const u8 gen2_id_table_cmd[] = {
  326. 0x37, /* SCU_CMD0 */
  327. 0x38, /* SCU_CMD1 */
  328. };
  329. static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
  330. struct rsnd_mod *mod)
  331. {
  332. struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
  333. struct rsnd_mod *src = rsnd_io_to_mod_src(io);
  334. struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
  335. const u8 *entry = NULL;
  336. int id = rsnd_mod_id(mod);
  337. int size = 0;
  338. if (mod == ssi) {
  339. entry = gen2_id_table_ssiu;
  340. size = ARRAY_SIZE(gen2_id_table_ssiu);
  341. } else if (mod == src) {
  342. entry = gen2_id_table_scu;
  343. size = ARRAY_SIZE(gen2_id_table_scu);
  344. } else if (mod == dvc) {
  345. entry = gen2_id_table_cmd;
  346. size = ARRAY_SIZE(gen2_id_table_cmd);
  347. }
  348. if ((!entry) || (size <= id)) {
  349. struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
  350. dev_err(dev, "unknown connection (%s[%d])\n",
  351. rsnd_mod_name(mod), rsnd_mod_id(mod));
  352. /* use non-prohibited SRS number as error */
  353. return 0x00; /* SSI00 */
  354. }
  355. return entry[id];
  356. }
  357. static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
  358. struct rsnd_mod *mod_from,
  359. struct rsnd_mod *mod_to)
  360. {
  361. return (rsnd_dmapp_get_id(io, mod_from) << 24) +
  362. (rsnd_dmapp_get_id(io, mod_to) << 16);
  363. }
  364. #define rsnd_dmapp_addr(dmac, dma, reg) \
  365. (dmac->base + 0x20 + reg + \
  366. (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
  367. static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
  368. {
  369. struct rsnd_mod *mod = rsnd_mod_get(dma);
  370. struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
  371. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  372. struct device *dev = rsnd_priv_to_dev(priv);
  373. dev_dbg(dev, "w %p : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
  374. iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
  375. }
  376. static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
  377. {
  378. struct rsnd_mod *mod = rsnd_mod_get(dma);
  379. struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
  380. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  381. return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
  382. }
  383. static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
  384. {
  385. struct rsnd_mod *mod = rsnd_mod_get(dma);
  386. struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
  387. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  388. void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
  389. u32 val = ioread32(addr);
  390. val &= ~mask;
  391. val |= (data & mask);
  392. iowrite32(val, addr);
  393. }
  394. static int rsnd_dmapp_stop(struct rsnd_mod *mod,
  395. struct rsnd_dai_stream *io,
  396. struct rsnd_priv *priv)
  397. {
  398. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  399. int i;
  400. rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
  401. for (i = 0; i < 1024; i++) {
  402. if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
  403. return 0;
  404. udelay(1);
  405. }
  406. return -EIO;
  407. }
  408. static int rsnd_dmapp_start(struct rsnd_mod *mod,
  409. struct rsnd_dai_stream *io,
  410. struct rsnd_priv *priv)
  411. {
  412. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  413. struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
  414. rsnd_dmapp_write(dma, dma->src_addr, PDMASAR);
  415. rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR);
  416. rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR);
  417. return 0;
  418. }
  419. static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
  420. struct rsnd_dma *dma,
  421. struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
  422. {
  423. struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
  424. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  425. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  426. struct device *dev = rsnd_priv_to_dev(priv);
  427. dmapp->dmapp_id = dmac->dmapp_num;
  428. dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE;
  429. dmac->dmapp_num++;
  430. dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
  431. dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
  432. return 0;
  433. }
  434. static struct rsnd_mod_ops rsnd_dmapp_ops = {
  435. .name = "audmac-pp",
  436. .start = rsnd_dmapp_start,
  437. .stop = rsnd_dmapp_stop,
  438. .quit = rsnd_dmapp_stop,
  439. };
  440. /*
  441. * Common DMAC Interface
  442. */
  443. /*
  444. * DMA read/write register offset
  445. *
  446. * RSND_xxx_I_N for Audio DMAC input
  447. * RSND_xxx_O_N for Audio DMAC output
  448. * RSND_xxx_I_P for Audio DMAC peri peri input
  449. * RSND_xxx_O_P for Audio DMAC peri peri output
  450. *
  451. * ex) R-Car H2 case
  452. * mod / DMAC in / DMAC out / DMAC PP in / DMAC pp out
  453. * SSI : 0xec541000 / 0xec241008 / 0xec24100c
  454. * SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000
  455. * SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000
  456. * CMD : 0xec500000 / / 0xec008000 0xec308000
  457. */
  458. #define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
  459. #define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
  460. #define RDMA_SSIU_I_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
  461. #define RDMA_SSIU_O_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
  462. #define RDMA_SSIU_I_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
  463. #define RDMA_SSIU_O_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
  464. #define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
  465. #define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
  466. #define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i))
  467. #define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i))
  468. #define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i))
  469. #define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i))
  470. static dma_addr_t
  471. rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
  472. struct rsnd_mod *mod,
  473. int is_play, int is_from)
  474. {
  475. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  476. struct device *dev = rsnd_priv_to_dev(priv);
  477. phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
  478. phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
  479. int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod);
  480. int use_src = !!rsnd_io_to_mod_src(io);
  481. int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
  482. !!rsnd_io_to_mod_mix(io) ||
  483. !!rsnd_io_to_mod_ctu(io);
  484. int id = rsnd_mod_id(mod);
  485. struct dma_addr {
  486. dma_addr_t out_addr;
  487. dma_addr_t in_addr;
  488. } dma_addrs[3][2][3] = {
  489. /* SRC */
  490. {{{ 0, 0 },
  491. /* Capture */
  492. { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) },
  493. { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } },
  494. /* Playback */
  495. {{ 0, 0, },
  496. { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) },
  497. { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } }
  498. },
  499. /* SSI */
  500. /* Capture */
  501. {{{ RDMA_SSI_O_N(ssi, id), 0 },
  502. { RDMA_SSIU_O_P(ssi, id), 0 },
  503. { RDMA_SSIU_O_P(ssi, id), 0 } },
  504. /* Playback */
  505. {{ 0, RDMA_SSI_I_N(ssi, id) },
  506. { 0, RDMA_SSIU_I_P(ssi, id) },
  507. { 0, RDMA_SSIU_I_P(ssi, id) } }
  508. },
  509. /* SSIU */
  510. /* Capture */
  511. {{{ RDMA_SSIU_O_N(ssi, id), 0 },
  512. { RDMA_SSIU_O_P(ssi, id), 0 },
  513. { RDMA_SSIU_O_P(ssi, id), 0 } },
  514. /* Playback */
  515. {{ 0, RDMA_SSIU_I_N(ssi, id) },
  516. { 0, RDMA_SSIU_I_P(ssi, id) },
  517. { 0, RDMA_SSIU_I_P(ssi, id) } } },
  518. };
  519. /* it shouldn't happen */
  520. if (use_cmd && !use_src)
  521. dev_err(dev, "DVC is selected without SRC\n");
  522. /* use SSIU or SSI ? */
  523. if (is_ssi && rsnd_ssi_use_busif(io))
  524. is_ssi++;
  525. return (is_from) ?
  526. dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
  527. dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
  528. }
  529. static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
  530. struct rsnd_mod *mod,
  531. int is_play, int is_from)
  532. {
  533. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  534. /*
  535. * gen1 uses default DMA addr
  536. */
  537. if (rsnd_is_gen1(priv))
  538. return 0;
  539. if (!mod)
  540. return 0;
  541. return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
  542. }
  543. #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
  544. static void rsnd_dma_of_path(struct rsnd_mod *this,
  545. struct rsnd_dai_stream *io,
  546. int is_play,
  547. struct rsnd_mod **mod_from,
  548. struct rsnd_mod **mod_to)
  549. {
  550. struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
  551. struct rsnd_mod *src = rsnd_io_to_mod_src(io);
  552. struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
  553. struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
  554. struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
  555. struct rsnd_mod *mod[MOD_MAX];
  556. struct rsnd_mod *mod_start, *mod_end;
  557. struct rsnd_priv *priv = rsnd_mod_to_priv(this);
  558. struct device *dev = rsnd_priv_to_dev(priv);
  559. int nr, i, idx;
  560. if (!ssi)
  561. return;
  562. nr = 0;
  563. for (i = 0; i < MOD_MAX; i++) {
  564. mod[i] = NULL;
  565. nr += !!rsnd_io_to_mod(io, i);
  566. }
  567. /*
  568. * [S] -*-> [E]
  569. * [S] -*-> SRC -o-> [E]
  570. * [S] -*-> SRC -> DVC -o-> [E]
  571. * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
  572. *
  573. * playback [S] = mem
  574. * [E] = SSI
  575. *
  576. * capture [S] = SSI
  577. * [E] = mem
  578. *
  579. * -*-> Audio DMAC
  580. * -o-> Audio DMAC peri peri
  581. */
  582. mod_start = (is_play) ? NULL : ssi;
  583. mod_end = (is_play) ? ssi : NULL;
  584. idx = 0;
  585. mod[idx++] = mod_start;
  586. for (i = 1; i < nr; i++) {
  587. if (src) {
  588. mod[idx++] = src;
  589. src = NULL;
  590. } else if (ctu) {
  591. mod[idx++] = ctu;
  592. ctu = NULL;
  593. } else if (mix) {
  594. mod[idx++] = mix;
  595. mix = NULL;
  596. } else if (dvc) {
  597. mod[idx++] = dvc;
  598. dvc = NULL;
  599. }
  600. }
  601. mod[idx] = mod_end;
  602. /*
  603. * | SSI | SRC |
  604. * -------------+-----+-----+
  605. * is_play | o | * |
  606. * !is_play | * | o |
  607. */
  608. if ((this == ssi) == (is_play)) {
  609. *mod_from = mod[idx - 1];
  610. *mod_to = mod[idx];
  611. } else {
  612. *mod_from = mod[0];
  613. *mod_to = mod[1];
  614. }
  615. dev_dbg(dev, "module connection (this is %s[%d])\n",
  616. rsnd_mod_name(this), rsnd_mod_id(this));
  617. for (i = 0; i <= idx; i++) {
  618. dev_dbg(dev, " %s[%d]%s\n",
  619. rsnd_mod_name(mod[i]), rsnd_mod_id(mod[i]),
  620. (mod[i] == *mod_from) ? " from" :
  621. (mod[i] == *mod_to) ? " to" : "");
  622. }
  623. }
  624. int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
  625. struct rsnd_mod **dma_mod)
  626. {
  627. struct rsnd_mod *mod_from = NULL;
  628. struct rsnd_mod *mod_to = NULL;
  629. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  630. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  631. struct device *dev = rsnd_priv_to_dev(priv);
  632. struct rsnd_mod_ops *ops;
  633. enum rsnd_mod_type type;
  634. int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
  635. struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
  636. int is_play = rsnd_io_is_play(io);
  637. int ret, dma_id;
  638. /*
  639. * DMA failed. try to PIO mode
  640. * see
  641. * rsnd_ssi_fallback()
  642. * rsnd_rdai_continuance_probe()
  643. */
  644. if (!dmac)
  645. return -EAGAIN;
  646. rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
  647. /* for Gen2 */
  648. if (mod_from && mod_to) {
  649. ops = &rsnd_dmapp_ops;
  650. attach = rsnd_dmapp_attach;
  651. dma_id = dmac->dmapp_num;
  652. type = RSND_MOD_AUDMAPP;
  653. } else {
  654. ops = &rsnd_dmaen_ops;
  655. attach = rsnd_dmaen_attach;
  656. dma_id = dmac->dmaen_num;
  657. type = RSND_MOD_AUDMA;
  658. }
  659. /* for Gen1, overwrite */
  660. if (rsnd_is_gen1(priv)) {
  661. ops = &rsnd_dmaen_ops;
  662. attach = rsnd_dmaen_attach;
  663. dma_id = dmac->dmaen_num;
  664. type = RSND_MOD_AUDMA;
  665. }
  666. if (!(*dma_mod)) {
  667. struct rsnd_dma *dma;
  668. dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
  669. if (!dma)
  670. return -ENOMEM;
  671. *dma_mod = rsnd_mod_get(dma);
  672. ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
  673. rsnd_mod_get_status, type, dma_id);
  674. if (ret < 0)
  675. return ret;
  676. dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n",
  677. rsnd_mod_name(*dma_mod), rsnd_mod_id(*dma_mod),
  678. rsnd_mod_name(mod_from), rsnd_mod_id(mod_from),
  679. rsnd_mod_name(mod_to), rsnd_mod_id(mod_to));
  680. ret = attach(io, dma, mod_from, mod_to);
  681. if (ret < 0)
  682. return ret;
  683. dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
  684. dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
  685. dma->mod_from = mod_from;
  686. dma->mod_to = mod_to;
  687. }
  688. ret = rsnd_dai_connect(*dma_mod, io, type);
  689. if (ret < 0)
  690. return ret;
  691. return 0;
  692. }
  693. int rsnd_dma_probe(struct rsnd_priv *priv)
  694. {
  695. struct platform_device *pdev = rsnd_priv_to_pdev(priv);
  696. struct device *dev = rsnd_priv_to_dev(priv);
  697. struct rsnd_dma_ctrl *dmac;
  698. struct resource *res;
  699. /*
  700. * for Gen1
  701. */
  702. if (rsnd_is_gen1(priv))
  703. return 0;
  704. /*
  705. * for Gen2
  706. */
  707. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
  708. dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
  709. if (!dmac || !res) {
  710. dev_err(dev, "dma allocate failed\n");
  711. return 0; /* it will be PIO mode */
  712. }
  713. dmac->dmapp_num = 0;
  714. dmac->base = devm_ioremap_resource(dev, res);
  715. if (IS_ERR(dmac->base))
  716. return PTR_ERR(dmac->base);
  717. priv->dma = dmac;
  718. return 0;
  719. }