cppi41.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194
  1. #include <linux/delay.h>
  2. #include <linux/dmaengine.h>
  3. #include <linux/dma-mapping.h>
  4. #include <linux/platform_device.h>
  5. #include <linux/module.h>
  6. #include <linux/of.h>
  7. #include <linux/slab.h>
  8. #include <linux/of_dma.h>
  9. #include <linux/of_irq.h>
  10. #include <linux/dmapool.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/of_address.h>
  13. #include <linux/pm_runtime.h>
  14. #include "dmaengine.h"
  15. #define DESC_TYPE 27
  16. #define DESC_TYPE_HOST 0x10
  17. #define DESC_TYPE_TEARD 0x13
  18. #define TD_DESC_IS_RX (1 << 16)
  19. #define TD_DESC_DMA_NUM 10
  20. #define DESC_LENGTH_BITS_NUM 21
  21. #define DESC_TYPE_USB (5 << 26)
  22. #define DESC_PD_COMPLETE (1 << 31)
  23. /* DMA engine */
  24. #define DMA_TDFDQ 4
  25. #define DMA_TXGCR(x) (0x800 + (x) * 0x20)
  26. #define DMA_RXGCR(x) (0x808 + (x) * 0x20)
  27. #define RXHPCRA0 4
  28. #define GCR_CHAN_ENABLE (1 << 31)
  29. #define GCR_TEARDOWN (1 << 30)
  30. #define GCR_STARV_RETRY (1 << 24)
  31. #define GCR_DESC_TYPE_HOST (1 << 14)
  32. /* DMA scheduler */
  33. #define DMA_SCHED_CTRL 0
  34. #define DMA_SCHED_CTRL_EN (1 << 31)
  35. #define DMA_SCHED_WORD(x) ((x) * 4 + 0x800)
  36. #define SCHED_ENTRY0_CHAN(x) ((x) << 0)
  37. #define SCHED_ENTRY0_IS_RX (1 << 7)
  38. #define SCHED_ENTRY1_CHAN(x) ((x) << 8)
  39. #define SCHED_ENTRY1_IS_RX (1 << 15)
  40. #define SCHED_ENTRY2_CHAN(x) ((x) << 16)
  41. #define SCHED_ENTRY2_IS_RX (1 << 23)
  42. #define SCHED_ENTRY3_CHAN(x) ((x) << 24)
  43. #define SCHED_ENTRY3_IS_RX (1 << 31)
  44. /* Queue manager */
  45. /* 4 KiB of memory for descriptors, 2 for each endpoint */
  46. #define ALLOC_DECS_NUM 128
  47. #define DESCS_AREAS 1
  48. #define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS)
  49. #define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4)
  50. #define QMGR_LRAM0_BASE 0x80
  51. #define QMGR_LRAM_SIZE 0x84
  52. #define QMGR_LRAM1_BASE 0x88
  53. #define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10)
  54. #define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10)
  55. #define QMGR_MEMCTRL_IDX_SH 16
  56. #define QMGR_MEMCTRL_DESC_SH 8
  57. #define QMGR_NUM_PEND 5
  58. #define QMGR_PEND(x) (0x90 + (x) * 4)
  59. #define QMGR_PENDING_SLOT_Q(x) (x / 32)
  60. #define QMGR_PENDING_BIT_Q(x) (x % 32)
  61. #define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10)
  62. #define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10)
  63. #define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10)
  64. #define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10)
  65. /* Glue layer specific */
  66. /* USBSS / USB AM335x */
  67. #define USBSS_IRQ_STATUS 0x28
  68. #define USBSS_IRQ_ENABLER 0x2c
  69. #define USBSS_IRQ_CLEARR 0x30
  70. #define USBSS_IRQ_PD_COMP (1 << 2)
  71. /* Packet Descriptor */
  72. #define PD2_ZERO_LENGTH (1 << 19)
  73. struct cppi41_channel {
  74. struct dma_chan chan;
  75. struct dma_async_tx_descriptor txd;
  76. struct cppi41_dd *cdd;
  77. struct cppi41_desc *desc;
  78. dma_addr_t desc_phys;
  79. void __iomem *gcr_reg;
  80. int is_tx;
  81. u32 residue;
  82. unsigned int q_num;
  83. unsigned int q_comp_num;
  84. unsigned int port_num;
  85. unsigned td_retry;
  86. unsigned td_queued:1;
  87. unsigned td_seen:1;
  88. unsigned td_desc_seen:1;
  89. struct list_head node; /* Node for pending list */
  90. };
  91. struct cppi41_desc {
  92. u32 pd0;
  93. u32 pd1;
  94. u32 pd2;
  95. u32 pd3;
  96. u32 pd4;
  97. u32 pd5;
  98. u32 pd6;
  99. u32 pd7;
  100. } __aligned(32);
  101. struct chan_queues {
  102. u16 submit;
  103. u16 complete;
  104. };
  105. struct cppi41_dd {
  106. struct dma_device ddev;
  107. void *qmgr_scratch;
  108. dma_addr_t scratch_phys;
  109. struct cppi41_desc *cd;
  110. dma_addr_t descs_phys;
  111. u32 first_td_desc;
  112. struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
  113. void __iomem *usbss_mem;
  114. void __iomem *ctrl_mem;
  115. void __iomem *sched_mem;
  116. void __iomem *qmgr_mem;
  117. unsigned int irq;
  118. const struct chan_queues *queues_rx;
  119. const struct chan_queues *queues_tx;
  120. struct chan_queues td_queue;
  121. struct list_head pending; /* Pending queued transfers */
  122. spinlock_t lock; /* Lock for pending list */
  123. /* context for suspend/resume */
  124. unsigned int dma_tdfdq;
  125. };
  126. #define FIST_COMPLETION_QUEUE 93
  127. static struct chan_queues usb_queues_tx[] = {
  128. /* USB0 ENDP 1 */
  129. [ 0] = { .submit = 32, .complete = 93},
  130. [ 1] = { .submit = 34, .complete = 94},
  131. [ 2] = { .submit = 36, .complete = 95},
  132. [ 3] = { .submit = 38, .complete = 96},
  133. [ 4] = { .submit = 40, .complete = 97},
  134. [ 5] = { .submit = 42, .complete = 98},
  135. [ 6] = { .submit = 44, .complete = 99},
  136. [ 7] = { .submit = 46, .complete = 100},
  137. [ 8] = { .submit = 48, .complete = 101},
  138. [ 9] = { .submit = 50, .complete = 102},
  139. [10] = { .submit = 52, .complete = 103},
  140. [11] = { .submit = 54, .complete = 104},
  141. [12] = { .submit = 56, .complete = 105},
  142. [13] = { .submit = 58, .complete = 106},
  143. [14] = { .submit = 60, .complete = 107},
  144. /* USB1 ENDP1 */
  145. [15] = { .submit = 62, .complete = 125},
  146. [16] = { .submit = 64, .complete = 126},
  147. [17] = { .submit = 66, .complete = 127},
  148. [18] = { .submit = 68, .complete = 128},
  149. [19] = { .submit = 70, .complete = 129},
  150. [20] = { .submit = 72, .complete = 130},
  151. [21] = { .submit = 74, .complete = 131},
  152. [22] = { .submit = 76, .complete = 132},
  153. [23] = { .submit = 78, .complete = 133},
  154. [24] = { .submit = 80, .complete = 134},
  155. [25] = { .submit = 82, .complete = 135},
  156. [26] = { .submit = 84, .complete = 136},
  157. [27] = { .submit = 86, .complete = 137},
  158. [28] = { .submit = 88, .complete = 138},
  159. [29] = { .submit = 90, .complete = 139},
  160. };
  161. static const struct chan_queues usb_queues_rx[] = {
  162. /* USB0 ENDP 1 */
  163. [ 0] = { .submit = 1, .complete = 109},
  164. [ 1] = { .submit = 2, .complete = 110},
  165. [ 2] = { .submit = 3, .complete = 111},
  166. [ 3] = { .submit = 4, .complete = 112},
  167. [ 4] = { .submit = 5, .complete = 113},
  168. [ 5] = { .submit = 6, .complete = 114},
  169. [ 6] = { .submit = 7, .complete = 115},
  170. [ 7] = { .submit = 8, .complete = 116},
  171. [ 8] = { .submit = 9, .complete = 117},
  172. [ 9] = { .submit = 10, .complete = 118},
  173. [10] = { .submit = 11, .complete = 119},
  174. [11] = { .submit = 12, .complete = 120},
  175. [12] = { .submit = 13, .complete = 121},
  176. [13] = { .submit = 14, .complete = 122},
  177. [14] = { .submit = 15, .complete = 123},
  178. /* USB1 ENDP 1 */
  179. [15] = { .submit = 16, .complete = 141},
  180. [16] = { .submit = 17, .complete = 142},
  181. [17] = { .submit = 18, .complete = 143},
  182. [18] = { .submit = 19, .complete = 144},
  183. [19] = { .submit = 20, .complete = 145},
  184. [20] = { .submit = 21, .complete = 146},
  185. [21] = { .submit = 22, .complete = 147},
  186. [22] = { .submit = 23, .complete = 148},
  187. [23] = { .submit = 24, .complete = 149},
  188. [24] = { .submit = 25, .complete = 150},
  189. [25] = { .submit = 26, .complete = 151},
  190. [26] = { .submit = 27, .complete = 152},
  191. [27] = { .submit = 28, .complete = 153},
  192. [28] = { .submit = 29, .complete = 154},
  193. [29] = { .submit = 30, .complete = 155},
  194. };
  195. struct cppi_glue_infos {
  196. irqreturn_t (*isr)(int irq, void *data);
  197. const struct chan_queues *queues_rx;
  198. const struct chan_queues *queues_tx;
  199. struct chan_queues td_queue;
  200. };
  201. static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
  202. {
  203. return container_of(c, struct cppi41_channel, chan);
  204. }
  205. static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
  206. {
  207. struct cppi41_channel *c;
  208. u32 descs_size;
  209. u32 desc_num;
  210. descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM;
  211. if (!((desc >= cdd->descs_phys) &&
  212. (desc < (cdd->descs_phys + descs_size)))) {
  213. return NULL;
  214. }
  215. desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc);
  216. BUG_ON(desc_num >= ALLOC_DECS_NUM);
  217. c = cdd->chan_busy[desc_num];
  218. cdd->chan_busy[desc_num] = NULL;
  219. return c;
  220. }
  221. static void cppi_writel(u32 val, void *__iomem *mem)
  222. {
  223. __raw_writel(val, mem);
  224. }
  225. static u32 cppi_readl(void *__iomem *mem)
  226. {
  227. return __raw_readl(mem);
  228. }
  229. static u32 pd_trans_len(u32 val)
  230. {
  231. return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
  232. }
  233. static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
  234. {
  235. u32 desc;
  236. desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
  237. desc &= ~0x1f;
  238. return desc;
  239. }
  240. static irqreturn_t cppi41_irq(int irq, void *data)
  241. {
  242. struct cppi41_dd *cdd = data;
  243. struct cppi41_channel *c;
  244. u32 status;
  245. int i;
  246. status = cppi_readl(cdd->usbss_mem + USBSS_IRQ_STATUS);
  247. if (!(status & USBSS_IRQ_PD_COMP))
  248. return IRQ_NONE;
  249. cppi_writel(status, cdd->usbss_mem + USBSS_IRQ_STATUS);
  250. for (i = QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE); i < QMGR_NUM_PEND;
  251. i++) {
  252. u32 val;
  253. u32 q_num;
  254. val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
  255. if (i == QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE) && val) {
  256. u32 mask;
  257. /* set corresponding bit for completetion Q 93 */
  258. mask = 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE);
  259. /* not set all bits for queues less than Q 93 */
  260. mask--;
  261. /* now invert and keep only Q 93+ set */
  262. val &= ~mask;
  263. }
  264. if (val)
  265. __iormb();
  266. while (val) {
  267. u32 desc, len;
  268. int error;
  269. error = pm_runtime_get(cdd->ddev.dev);
  270. if (error < 0)
  271. dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
  272. __func__, error);
  273. q_num = __fls(val);
  274. val &= ~(1 << q_num);
  275. q_num += 32 * i;
  276. desc = cppi41_pop_desc(cdd, q_num);
  277. c = desc_to_chan(cdd, desc);
  278. if (WARN_ON(!c)) {
  279. pr_err("%s() q %d desc %08x\n", __func__,
  280. q_num, desc);
  281. continue;
  282. }
  283. if (c->desc->pd2 & PD2_ZERO_LENGTH)
  284. len = 0;
  285. else
  286. len = pd_trans_len(c->desc->pd0);
  287. c->residue = pd_trans_len(c->desc->pd6) - len;
  288. dma_cookie_complete(&c->txd);
  289. dmaengine_desc_get_callback_invoke(&c->txd, NULL);
  290. pm_runtime_mark_last_busy(cdd->ddev.dev);
  291. pm_runtime_put_autosuspend(cdd->ddev.dev);
  292. }
  293. }
  294. return IRQ_HANDLED;
  295. }
  296. static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
  297. {
  298. dma_cookie_t cookie;
  299. cookie = dma_cookie_assign(tx);
  300. return cookie;
  301. }
  302. static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
  303. {
  304. struct cppi41_channel *c = to_cpp41_chan(chan);
  305. struct cppi41_dd *cdd = c->cdd;
  306. int error;
  307. error = pm_runtime_get_sync(cdd->ddev.dev);
  308. if (error < 0) {
  309. dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
  310. __func__, error);
  311. pm_runtime_put_noidle(cdd->ddev.dev);
  312. return error;
  313. }
  314. dma_cookie_init(chan);
  315. dma_async_tx_descriptor_init(&c->txd, chan);
  316. c->txd.tx_submit = cppi41_tx_submit;
  317. if (!c->is_tx)
  318. cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
  319. pm_runtime_mark_last_busy(cdd->ddev.dev);
  320. pm_runtime_put_autosuspend(cdd->ddev.dev);
  321. return 0;
  322. }
  323. static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
  324. {
  325. struct cppi41_channel *c = to_cpp41_chan(chan);
  326. struct cppi41_dd *cdd = c->cdd;
  327. int error;
  328. error = pm_runtime_get_sync(cdd->ddev.dev);
  329. if (error < 0) {
  330. pm_runtime_put_noidle(cdd->ddev.dev);
  331. return;
  332. }
  333. WARN_ON(!list_empty(&cdd->pending));
  334. pm_runtime_mark_last_busy(cdd->ddev.dev);
  335. pm_runtime_put_autosuspend(cdd->ddev.dev);
  336. }
  337. static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
  338. dma_cookie_t cookie, struct dma_tx_state *txstate)
  339. {
  340. struct cppi41_channel *c = to_cpp41_chan(chan);
  341. enum dma_status ret;
  342. /* lock */
  343. ret = dma_cookie_status(chan, cookie, txstate);
  344. if (txstate && ret == DMA_COMPLETE)
  345. txstate->residue = c->residue;
  346. /* unlock */
  347. return ret;
  348. }
  349. static void push_desc_queue(struct cppi41_channel *c)
  350. {
  351. struct cppi41_dd *cdd = c->cdd;
  352. u32 desc_num;
  353. u32 desc_phys;
  354. u32 reg;
  355. c->residue = 0;
  356. reg = GCR_CHAN_ENABLE;
  357. if (!c->is_tx) {
  358. reg |= GCR_STARV_RETRY;
  359. reg |= GCR_DESC_TYPE_HOST;
  360. reg |= c->q_comp_num;
  361. }
  362. cppi_writel(reg, c->gcr_reg);
  363. /*
  364. * We don't use writel() but __raw_writel() so we have to make sure
  365. * that the DMA descriptor in coherent memory made to the main memory
  366. * before starting the dma engine.
  367. */
  368. __iowmb();
  369. desc_phys = lower_32_bits(c->desc_phys);
  370. desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
  371. WARN_ON(cdd->chan_busy[desc_num]);
  372. cdd->chan_busy[desc_num] = c;
  373. reg = (sizeof(struct cppi41_desc) - 24) / 4;
  374. reg |= desc_phys;
  375. cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
  376. }
  377. static void pending_desc(struct cppi41_channel *c)
  378. {
  379. struct cppi41_dd *cdd = c->cdd;
  380. unsigned long flags;
  381. spin_lock_irqsave(&cdd->lock, flags);
  382. list_add_tail(&c->node, &cdd->pending);
  383. spin_unlock_irqrestore(&cdd->lock, flags);
  384. }
  385. static void cppi41_dma_issue_pending(struct dma_chan *chan)
  386. {
  387. struct cppi41_channel *c = to_cpp41_chan(chan);
  388. struct cppi41_dd *cdd = c->cdd;
  389. int error;
  390. error = pm_runtime_get(cdd->ddev.dev);
  391. if ((error != -EINPROGRESS) && error < 0) {
  392. pm_runtime_put_noidle(cdd->ddev.dev);
  393. dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
  394. error);
  395. return;
  396. }
  397. if (likely(pm_runtime_active(cdd->ddev.dev)))
  398. push_desc_queue(c);
  399. else
  400. pending_desc(c);
  401. pm_runtime_mark_last_busy(cdd->ddev.dev);
  402. pm_runtime_put_autosuspend(cdd->ddev.dev);
  403. }
  404. static u32 get_host_pd0(u32 length)
  405. {
  406. u32 reg;
  407. reg = DESC_TYPE_HOST << DESC_TYPE;
  408. reg |= length;
  409. return reg;
  410. }
  411. static u32 get_host_pd1(struct cppi41_channel *c)
  412. {
  413. u32 reg;
  414. reg = 0;
  415. return reg;
  416. }
  417. static u32 get_host_pd2(struct cppi41_channel *c)
  418. {
  419. u32 reg;
  420. reg = DESC_TYPE_USB;
  421. reg |= c->q_comp_num;
  422. return reg;
  423. }
  424. static u32 get_host_pd3(u32 length)
  425. {
  426. u32 reg;
  427. /* PD3 = packet size */
  428. reg = length;
  429. return reg;
  430. }
  431. static u32 get_host_pd6(u32 length)
  432. {
  433. u32 reg;
  434. /* PD6 buffer size */
  435. reg = DESC_PD_COMPLETE;
  436. reg |= length;
  437. return reg;
  438. }
  439. static u32 get_host_pd4_or_7(u32 addr)
  440. {
  441. u32 reg;
  442. reg = addr;
  443. return reg;
  444. }
  445. static u32 get_host_pd5(void)
  446. {
  447. u32 reg;
  448. reg = 0;
  449. return reg;
  450. }
  451. static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
  452. struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len,
  453. enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
  454. {
  455. struct cppi41_channel *c = to_cpp41_chan(chan);
  456. struct cppi41_desc *d;
  457. struct scatterlist *sg;
  458. unsigned int i;
  459. d = c->desc;
  460. for_each_sg(sgl, sg, sg_len, i) {
  461. u32 addr;
  462. u32 len;
  463. /* We need to use more than one desc once musb supports sg */
  464. addr = lower_32_bits(sg_dma_address(sg));
  465. len = sg_dma_len(sg);
  466. d->pd0 = get_host_pd0(len);
  467. d->pd1 = get_host_pd1(c);
  468. d->pd2 = get_host_pd2(c);
  469. d->pd3 = get_host_pd3(len);
  470. d->pd4 = get_host_pd4_or_7(addr);
  471. d->pd5 = get_host_pd5();
  472. d->pd6 = get_host_pd6(len);
  473. d->pd7 = get_host_pd4_or_7(addr);
  474. d++;
  475. }
  476. return &c->txd;
  477. }
  478. static void cppi41_compute_td_desc(struct cppi41_desc *d)
  479. {
  480. d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
  481. }
  482. static int cppi41_tear_down_chan(struct cppi41_channel *c)
  483. {
  484. struct cppi41_dd *cdd = c->cdd;
  485. struct cppi41_desc *td;
  486. u32 reg;
  487. u32 desc_phys;
  488. u32 td_desc_phys;
  489. td = cdd->cd;
  490. td += cdd->first_td_desc;
  491. td_desc_phys = cdd->descs_phys;
  492. td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc);
  493. if (!c->td_queued) {
  494. cppi41_compute_td_desc(td);
  495. __iowmb();
  496. reg = (sizeof(struct cppi41_desc) - 24) / 4;
  497. reg |= td_desc_phys;
  498. cppi_writel(reg, cdd->qmgr_mem +
  499. QMGR_QUEUE_D(cdd->td_queue.submit));
  500. reg = GCR_CHAN_ENABLE;
  501. if (!c->is_tx) {
  502. reg |= GCR_STARV_RETRY;
  503. reg |= GCR_DESC_TYPE_HOST;
  504. reg |= c->q_comp_num;
  505. }
  506. reg |= GCR_TEARDOWN;
  507. cppi_writel(reg, c->gcr_reg);
  508. c->td_queued = 1;
  509. c->td_retry = 500;
  510. }
  511. if (!c->td_seen || !c->td_desc_seen) {
  512. desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
  513. if (!desc_phys)
  514. desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
  515. if (desc_phys == c->desc_phys) {
  516. c->td_desc_seen = 1;
  517. } else if (desc_phys == td_desc_phys) {
  518. u32 pd0;
  519. __iormb();
  520. pd0 = td->pd0;
  521. WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
  522. WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
  523. WARN_ON((pd0 & 0x1f) != c->port_num);
  524. c->td_seen = 1;
  525. } else if (desc_phys) {
  526. WARN_ON_ONCE(1);
  527. }
  528. }
  529. c->td_retry--;
  530. /*
  531. * If the TX descriptor / channel is in use, the caller needs to poke
  532. * his TD bit multiple times. After that he hardware releases the
  533. * transfer descriptor followed by TD descriptor. Waiting seems not to
  534. * cause any difference.
  535. * RX seems to be thrown out right away. However once the TearDown
  536. * descriptor gets through we are done. If we have seens the transfer
  537. * descriptor before the TD we fetch it from enqueue, it has to be
  538. * there waiting for us.
  539. */
  540. if (!c->td_seen && c->td_retry) {
  541. udelay(1);
  542. return -EAGAIN;
  543. }
  544. WARN_ON(!c->td_retry);
  545. if (!c->td_desc_seen) {
  546. desc_phys = cppi41_pop_desc(cdd, c->q_num);
  547. if (!desc_phys)
  548. desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
  549. WARN_ON(!desc_phys);
  550. }
  551. c->td_queued = 0;
  552. c->td_seen = 0;
  553. c->td_desc_seen = 0;
  554. cppi_writel(0, c->gcr_reg);
  555. return 0;
  556. }
  557. static int cppi41_stop_chan(struct dma_chan *chan)
  558. {
  559. struct cppi41_channel *c = to_cpp41_chan(chan);
  560. struct cppi41_dd *cdd = c->cdd;
  561. u32 desc_num;
  562. u32 desc_phys;
  563. int ret;
  564. desc_phys = lower_32_bits(c->desc_phys);
  565. desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
  566. if (!cdd->chan_busy[desc_num])
  567. return 0;
  568. ret = cppi41_tear_down_chan(c);
  569. if (ret)
  570. return ret;
  571. WARN_ON(!cdd->chan_busy[desc_num]);
  572. cdd->chan_busy[desc_num] = NULL;
  573. return 0;
  574. }
  575. static void cleanup_chans(struct cppi41_dd *cdd)
  576. {
  577. while (!list_empty(&cdd->ddev.channels)) {
  578. struct cppi41_channel *cchan;
  579. cchan = list_first_entry(&cdd->ddev.channels,
  580. struct cppi41_channel, chan.device_node);
  581. list_del(&cchan->chan.device_node);
  582. kfree(cchan);
  583. }
  584. }
  585. static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
  586. {
  587. struct cppi41_channel *cchan;
  588. int i;
  589. int ret;
  590. u32 n_chans;
  591. ret = of_property_read_u32(dev->of_node, "#dma-channels",
  592. &n_chans);
  593. if (ret)
  594. return ret;
  595. /*
  596. * The channels can only be used as TX or as RX. So we add twice
  597. * that much dma channels because USB can only do RX or TX.
  598. */
  599. n_chans *= 2;
  600. for (i = 0; i < n_chans; i++) {
  601. cchan = kzalloc(sizeof(*cchan), GFP_KERNEL);
  602. if (!cchan)
  603. goto err;
  604. cchan->cdd = cdd;
  605. if (i & 1) {
  606. cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1);
  607. cchan->is_tx = 1;
  608. } else {
  609. cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1);
  610. cchan->is_tx = 0;
  611. }
  612. cchan->port_num = i >> 1;
  613. cchan->desc = &cdd->cd[i];
  614. cchan->desc_phys = cdd->descs_phys;
  615. cchan->desc_phys += i * sizeof(struct cppi41_desc);
  616. cchan->chan.device = &cdd->ddev;
  617. list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
  618. }
  619. cdd->first_td_desc = n_chans;
  620. return 0;
  621. err:
  622. cleanup_chans(cdd);
  623. return -ENOMEM;
  624. }
  625. static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
  626. {
  627. unsigned int mem_decs;
  628. int i;
  629. mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc);
  630. for (i = 0; i < DESCS_AREAS; i++) {
  631. cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
  632. cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
  633. dma_free_coherent(dev, mem_decs, cdd->cd,
  634. cdd->descs_phys);
  635. }
  636. }
  637. static void disable_sched(struct cppi41_dd *cdd)
  638. {
  639. cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
  640. }
  641. static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
  642. {
  643. disable_sched(cdd);
  644. purge_descs(dev, cdd);
  645. cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
  646. cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
  647. dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
  648. cdd->scratch_phys);
  649. }
  650. static int init_descs(struct device *dev, struct cppi41_dd *cdd)
  651. {
  652. unsigned int desc_size;
  653. unsigned int mem_decs;
  654. int i;
  655. u32 reg;
  656. u32 idx;
  657. BUILD_BUG_ON(sizeof(struct cppi41_desc) &
  658. (sizeof(struct cppi41_desc) - 1));
  659. BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32);
  660. BUILD_BUG_ON(ALLOC_DECS_NUM < 32);
  661. desc_size = sizeof(struct cppi41_desc);
  662. mem_decs = ALLOC_DECS_NUM * desc_size;
  663. idx = 0;
  664. for (i = 0; i < DESCS_AREAS; i++) {
  665. reg = idx << QMGR_MEMCTRL_IDX_SH;
  666. reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH;
  667. reg |= ilog2(ALLOC_DECS_NUM) - 5;
  668. BUILD_BUG_ON(DESCS_AREAS != 1);
  669. cdd->cd = dma_alloc_coherent(dev, mem_decs,
  670. &cdd->descs_phys, GFP_KERNEL);
  671. if (!cdd->cd)
  672. return -ENOMEM;
  673. cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
  674. cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i));
  675. idx += ALLOC_DECS_NUM;
  676. }
  677. return 0;
  678. }
  679. static void init_sched(struct cppi41_dd *cdd)
  680. {
  681. unsigned ch;
  682. unsigned word;
  683. u32 reg;
  684. word = 0;
  685. cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
  686. for (ch = 0; ch < 15 * 2; ch += 2) {
  687. reg = SCHED_ENTRY0_CHAN(ch);
  688. reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
  689. reg |= SCHED_ENTRY2_CHAN(ch + 1);
  690. reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX;
  691. cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
  692. word++;
  693. }
  694. reg = 15 * 2 * 2 - 1;
  695. reg |= DMA_SCHED_CTRL_EN;
  696. cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
  697. }
  698. static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
  699. {
  700. int ret;
  701. BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
  702. cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
  703. &cdd->scratch_phys, GFP_KERNEL);
  704. if (!cdd->qmgr_scratch)
  705. return -ENOMEM;
  706. cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
  707. cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
  708. cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
  709. ret = init_descs(dev, cdd);
  710. if (ret)
  711. goto err_td;
  712. cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
  713. init_sched(cdd);
  714. return 0;
  715. err_td:
  716. deinit_cppi41(dev, cdd);
  717. return ret;
  718. }
  719. static struct platform_driver cpp41_dma_driver;
  720. /*
  721. * The param format is:
  722. * X Y
  723. * X: Port
  724. * Y: 0 = RX else TX
  725. */
  726. #define INFO_PORT 0
  727. #define INFO_IS_TX 1
  728. static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
  729. {
  730. struct cppi41_channel *cchan;
  731. struct cppi41_dd *cdd;
  732. const struct chan_queues *queues;
  733. u32 *num = param;
  734. if (chan->device->dev->driver != &cpp41_dma_driver.driver)
  735. return false;
  736. cchan = to_cpp41_chan(chan);
  737. if (cchan->port_num != num[INFO_PORT])
  738. return false;
  739. if (cchan->is_tx && !num[INFO_IS_TX])
  740. return false;
  741. cdd = cchan->cdd;
  742. if (cchan->is_tx)
  743. queues = cdd->queues_tx;
  744. else
  745. queues = cdd->queues_rx;
  746. BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx) != ARRAY_SIZE(usb_queues_tx));
  747. if (WARN_ON(cchan->port_num > ARRAY_SIZE(usb_queues_rx)))
  748. return false;
  749. cchan->q_num = queues[cchan->port_num].submit;
  750. cchan->q_comp_num = queues[cchan->port_num].complete;
  751. return true;
  752. }
  753. static struct of_dma_filter_info cpp41_dma_info = {
  754. .filter_fn = cpp41_dma_filter_fn,
  755. };
  756. static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
  757. struct of_dma *ofdma)
  758. {
  759. int count = dma_spec->args_count;
  760. struct of_dma_filter_info *info = ofdma->of_dma_data;
  761. if (!info || !info->filter_fn)
  762. return NULL;
  763. if (count != 2)
  764. return NULL;
  765. return dma_request_channel(info->dma_cap, info->filter_fn,
  766. &dma_spec->args[0]);
  767. }
  768. static const struct cppi_glue_infos usb_infos = {
  769. .isr = cppi41_irq,
  770. .queues_rx = usb_queues_rx,
  771. .queues_tx = usb_queues_tx,
  772. .td_queue = { .submit = 31, .complete = 0 },
  773. };
  774. static const struct of_device_id cppi41_dma_ids[] = {
  775. { .compatible = "ti,am3359-cppi41", .data = &usb_infos},
  776. {},
  777. };
  778. MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
  779. static const struct cppi_glue_infos *get_glue_info(struct device *dev)
  780. {
  781. const struct of_device_id *of_id;
  782. of_id = of_match_node(cppi41_dma_ids, dev->of_node);
  783. if (!of_id)
  784. return NULL;
  785. return of_id->data;
  786. }
  787. #define CPPI41_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  788. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  789. BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
  790. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  791. static int cppi41_dma_probe(struct platform_device *pdev)
  792. {
  793. struct cppi41_dd *cdd;
  794. struct device *dev = &pdev->dev;
  795. const struct cppi_glue_infos *glue_info;
  796. int irq;
  797. int ret;
  798. glue_info = get_glue_info(dev);
  799. if (!glue_info)
  800. return -EINVAL;
  801. cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL);
  802. if (!cdd)
  803. return -ENOMEM;
  804. dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask);
  805. cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources;
  806. cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources;
  807. cdd->ddev.device_tx_status = cppi41_dma_tx_status;
  808. cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
  809. cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
  810. cdd->ddev.device_terminate_all = cppi41_stop_chan;
  811. cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  812. cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS;
  813. cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS;
  814. cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  815. cdd->ddev.dev = dev;
  816. INIT_LIST_HEAD(&cdd->ddev.channels);
  817. cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
  818. cdd->usbss_mem = of_iomap(dev->of_node, 0);
  819. cdd->ctrl_mem = of_iomap(dev->of_node, 1);
  820. cdd->sched_mem = of_iomap(dev->of_node, 2);
  821. cdd->qmgr_mem = of_iomap(dev->of_node, 3);
  822. spin_lock_init(&cdd->lock);
  823. INIT_LIST_HEAD(&cdd->pending);
  824. platform_set_drvdata(pdev, cdd);
  825. if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
  826. !cdd->qmgr_mem)
  827. return -ENXIO;
  828. pm_runtime_enable(dev);
  829. pm_runtime_set_autosuspend_delay(dev, 100);
  830. pm_runtime_use_autosuspend(dev);
  831. ret = pm_runtime_get_sync(dev);
  832. if (ret < 0)
  833. goto err_get_sync;
  834. cdd->queues_rx = glue_info->queues_rx;
  835. cdd->queues_tx = glue_info->queues_tx;
  836. cdd->td_queue = glue_info->td_queue;
  837. ret = init_cppi41(dev, cdd);
  838. if (ret)
  839. goto err_init_cppi;
  840. ret = cppi41_add_chans(dev, cdd);
  841. if (ret)
  842. goto err_chans;
  843. irq = irq_of_parse_and_map(dev->of_node, 0);
  844. if (!irq) {
  845. ret = -EINVAL;
  846. goto err_irq;
  847. }
  848. cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
  849. ret = devm_request_irq(&pdev->dev, irq, glue_info->isr, IRQF_SHARED,
  850. dev_name(dev), cdd);
  851. if (ret)
  852. goto err_irq;
  853. cdd->irq = irq;
  854. ret = dma_async_device_register(&cdd->ddev);
  855. if (ret)
  856. goto err_dma_reg;
  857. ret = of_dma_controller_register(dev->of_node,
  858. cppi41_dma_xlate, &cpp41_dma_info);
  859. if (ret)
  860. goto err_of;
  861. pm_runtime_mark_last_busy(dev);
  862. pm_runtime_put_autosuspend(dev);
  863. return 0;
  864. err_of:
  865. dma_async_device_unregister(&cdd->ddev);
  866. err_dma_reg:
  867. err_irq:
  868. cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
  869. cleanup_chans(cdd);
  870. err_chans:
  871. deinit_cppi41(dev, cdd);
  872. err_init_cppi:
  873. pm_runtime_dont_use_autosuspend(dev);
  874. err_get_sync:
  875. pm_runtime_put_sync(dev);
  876. pm_runtime_disable(dev);
  877. iounmap(cdd->usbss_mem);
  878. iounmap(cdd->ctrl_mem);
  879. iounmap(cdd->sched_mem);
  880. iounmap(cdd->qmgr_mem);
  881. return ret;
  882. }
  883. static int cppi41_dma_remove(struct platform_device *pdev)
  884. {
  885. struct cppi41_dd *cdd = platform_get_drvdata(pdev);
  886. int error;
  887. error = pm_runtime_get_sync(&pdev->dev);
  888. if (error < 0)
  889. dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
  890. __func__, error);
  891. of_dma_controller_free(pdev->dev.of_node);
  892. dma_async_device_unregister(&cdd->ddev);
  893. cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
  894. devm_free_irq(&pdev->dev, cdd->irq, cdd);
  895. cleanup_chans(cdd);
  896. deinit_cppi41(&pdev->dev, cdd);
  897. iounmap(cdd->usbss_mem);
  898. iounmap(cdd->ctrl_mem);
  899. iounmap(cdd->sched_mem);
  900. iounmap(cdd->qmgr_mem);
  901. pm_runtime_dont_use_autosuspend(&pdev->dev);
  902. pm_runtime_put_sync(&pdev->dev);
  903. pm_runtime_disable(&pdev->dev);
  904. return 0;
  905. }
  906. static int __maybe_unused cppi41_suspend(struct device *dev)
  907. {
  908. struct cppi41_dd *cdd = dev_get_drvdata(dev);
  909. cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
  910. cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
  911. disable_sched(cdd);
  912. return 0;
  913. }
  914. static int __maybe_unused cppi41_resume(struct device *dev)
  915. {
  916. struct cppi41_dd *cdd = dev_get_drvdata(dev);
  917. struct cppi41_channel *c;
  918. int i;
  919. for (i = 0; i < DESCS_AREAS; i++)
  920. cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
  921. list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
  922. if (!c->is_tx)
  923. cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
  924. init_sched(cdd);
  925. cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
  926. cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
  927. cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
  928. cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
  929. cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
  930. return 0;
  931. }
  932. static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
  933. {
  934. struct cppi41_dd *cdd = dev_get_drvdata(dev);
  935. WARN_ON(!list_empty(&cdd->pending));
  936. return 0;
  937. }
  938. static int __maybe_unused cppi41_runtime_resume(struct device *dev)
  939. {
  940. struct cppi41_dd *cdd = dev_get_drvdata(dev);
  941. struct cppi41_channel *c, *_c;
  942. unsigned long flags;
  943. spin_lock_irqsave(&cdd->lock, flags);
  944. list_for_each_entry_safe(c, _c, &cdd->pending, node) {
  945. push_desc_queue(c);
  946. list_del(&c->node);
  947. }
  948. spin_unlock_irqrestore(&cdd->lock, flags);
  949. return 0;
  950. }
  951. static const struct dev_pm_ops cppi41_pm_ops = {
  952. SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume)
  953. SET_RUNTIME_PM_OPS(cppi41_runtime_suspend,
  954. cppi41_runtime_resume,
  955. NULL)
  956. };
  957. static struct platform_driver cpp41_dma_driver = {
  958. .probe = cppi41_dma_probe,
  959. .remove = cppi41_dma_remove,
  960. .driver = {
  961. .name = "cppi41-dma-engine",
  962. .pm = &cppi41_pm_ops,
  963. .of_match_table = of_match_ptr(cppi41_dma_ids),
  964. },
  965. };
  966. module_platform_driver(cpp41_dma_driver);
  967. MODULE_LICENSE("GPL");
  968. MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");