musb_cppi41.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. #include <linux/device.h>
  2. #include <linux/dma-mapping.h>
  3. #include <linux/dmaengine.h>
  4. #include <linux/sizes.h>
  5. #include <linux/platform_device.h>
  6. #include <linux/of.h>
  7. #include "musb_core.h"
  8. #define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
  9. #define EP_MODE_AUTOREG_NONE 0
  10. #define EP_MODE_AUTOREG_ALL_NEOP 1
  11. #define EP_MODE_AUTOREG_ALWAYS 3
  12. #define EP_MODE_DMA_TRANSPARENT 0
  13. #define EP_MODE_DMA_RNDIS 1
  14. #define EP_MODE_DMA_GEN_RNDIS 3
  15. #define USB_CTRL_TX_MODE 0x70
  16. #define USB_CTRL_RX_MODE 0x74
  17. #define USB_CTRL_AUTOREQ 0xd0
  18. #define USB_TDOWN 0xd8
  19. struct cppi41_dma_channel {
  20. struct dma_channel channel;
  21. struct cppi41_dma_controller *controller;
  22. struct musb_hw_ep *hw_ep;
  23. struct dma_chan *dc;
  24. dma_cookie_t cookie;
  25. u8 port_num;
  26. u8 is_tx;
  27. u8 is_allocated;
  28. u8 usb_toggle;
  29. dma_addr_t buf_addr;
  30. u32 total_len;
  31. u32 prog_len;
  32. u32 transferred;
  33. u32 packet_sz;
  34. struct list_head tx_check;
  35. };
  36. #define MUSB_DMA_NUM_CHANNELS 15
  37. struct cppi41_dma_controller {
  38. struct dma_controller controller;
  39. struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
  40. struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
  41. struct musb *musb;
  42. struct hrtimer early_tx;
  43. struct list_head early_tx_list;
  44. u32 rx_mode;
  45. u32 tx_mode;
  46. u32 auto_req;
  47. };
  48. static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
  49. {
  50. u16 csr;
  51. u8 toggle;
  52. if (cppi41_channel->is_tx)
  53. return;
  54. if (!is_host_active(cppi41_channel->controller->musb))
  55. return;
  56. csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
  57. toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
  58. cppi41_channel->usb_toggle = toggle;
  59. }
  60. static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
  61. {
  62. u16 csr;
  63. u8 toggle;
  64. if (cppi41_channel->is_tx)
  65. return;
  66. if (!is_host_active(cppi41_channel->controller->musb))
  67. return;
  68. csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
  69. toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
  70. /*
  71. * AM335x Advisory 1.0.13: Due to internal synchronisation error the
  72. * data toggle may reset from DATA1 to DATA0 during receiving data from
  73. * more than one endpoint.
  74. */
  75. if (!toggle && toggle == cppi41_channel->usb_toggle) {
  76. csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
  77. musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
  78. dev_dbg(cppi41_channel->controller->musb->controller,
  79. "Restoring DATA1 toggle.\n");
  80. }
  81. cppi41_channel->usb_toggle = toggle;
  82. }
  83. static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
  84. {
  85. u8 epnum = hw_ep->epnum;
  86. struct musb *musb = hw_ep->musb;
  87. void __iomem *epio = musb->endpoints[epnum].regs;
  88. u16 csr;
  89. csr = musb_readw(epio, MUSB_TXCSR);
  90. if (csr & MUSB_TXCSR_TXPKTRDY)
  91. return false;
  92. return true;
  93. }
  94. static void cppi41_dma_callback(void *private_data);
  95. static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
  96. {
  97. struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
  98. struct musb *musb = hw_ep->musb;
  99. if (!cppi41_channel->prog_len) {
  100. /* done, complete */
  101. cppi41_channel->channel.actual_len =
  102. cppi41_channel->transferred;
  103. cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
  104. musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
  105. } else {
  106. /* next iteration, reload */
  107. struct dma_chan *dc = cppi41_channel->dc;
  108. struct dma_async_tx_descriptor *dma_desc;
  109. enum dma_transfer_direction direction;
  110. u16 csr;
  111. u32 remain_bytes;
  112. void __iomem *epio = cppi41_channel->hw_ep->regs;
  113. cppi41_channel->buf_addr += cppi41_channel->packet_sz;
  114. remain_bytes = cppi41_channel->total_len;
  115. remain_bytes -= cppi41_channel->transferred;
  116. remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
  117. cppi41_channel->prog_len = remain_bytes;
  118. direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
  119. : DMA_DEV_TO_MEM;
  120. dma_desc = dmaengine_prep_slave_single(dc,
  121. cppi41_channel->buf_addr,
  122. remain_bytes,
  123. direction,
  124. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  125. if (WARN_ON(!dma_desc))
  126. return;
  127. dma_desc->callback = cppi41_dma_callback;
  128. dma_desc->callback_param = &cppi41_channel->channel;
  129. cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
  130. dma_async_issue_pending(dc);
  131. if (!cppi41_channel->is_tx) {
  132. csr = musb_readw(epio, MUSB_RXCSR);
  133. csr |= MUSB_RXCSR_H_REQPKT;
  134. musb_writew(epio, MUSB_RXCSR, csr);
  135. }
  136. }
  137. }
  138. static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
  139. {
  140. struct cppi41_dma_controller *controller;
  141. struct cppi41_dma_channel *cppi41_channel, *n;
  142. struct musb *musb;
  143. unsigned long flags;
  144. enum hrtimer_restart ret = HRTIMER_NORESTART;
  145. controller = container_of(timer, struct cppi41_dma_controller,
  146. early_tx);
  147. musb = controller->musb;
  148. spin_lock_irqsave(&musb->lock, flags);
  149. list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
  150. tx_check) {
  151. bool empty;
  152. struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
  153. empty = musb_is_tx_fifo_empty(hw_ep);
  154. if (empty) {
  155. list_del_init(&cppi41_channel->tx_check);
  156. cppi41_trans_done(cppi41_channel);
  157. }
  158. }
  159. if (!list_empty(&controller->early_tx_list)) {
  160. ret = HRTIMER_RESTART;
  161. hrtimer_forward_now(&controller->early_tx,
  162. ktime_set(0, 150 * NSEC_PER_USEC));
  163. }
  164. spin_unlock_irqrestore(&musb->lock, flags);
  165. return ret;
  166. }
  167. static void cppi41_dma_callback(void *private_data)
  168. {
  169. struct dma_channel *channel = private_data;
  170. struct cppi41_dma_channel *cppi41_channel = channel->private_data;
  171. struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
  172. struct musb *musb = hw_ep->musb;
  173. unsigned long flags;
  174. struct dma_tx_state txstate;
  175. u32 transferred;
  176. bool empty;
  177. spin_lock_irqsave(&musb->lock, flags);
  178. dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
  179. &txstate);
  180. transferred = cppi41_channel->prog_len - txstate.residue;
  181. cppi41_channel->transferred += transferred;
  182. dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
  183. hw_ep->epnum, cppi41_channel->transferred,
  184. cppi41_channel->total_len);
  185. update_rx_toggle(cppi41_channel);
  186. if (cppi41_channel->transferred == cppi41_channel->total_len ||
  187. transferred < cppi41_channel->packet_sz)
  188. cppi41_channel->prog_len = 0;
  189. empty = musb_is_tx_fifo_empty(hw_ep);
  190. if (empty) {
  191. cppi41_trans_done(cppi41_channel);
  192. } else {
  193. struct cppi41_dma_controller *controller;
  194. /*
  195. * On AM335x it has been observed that the TX interrupt fires
  196. * too early that means the TXFIFO is not yet empty but the DMA
  197. * engine says that it is done with the transfer. We don't
  198. * receive a FIFO empty interrupt so the only thing we can do is
  199. * to poll for the bit. On HS it usually takes 2us, on FS around
  200. * 110us - 150us depending on the transfer size.
  201. * We spin on HS (no longer than than 25us and setup a timer on
  202. * FS to check for the bit and complete the transfer.
  203. */
  204. controller = cppi41_channel->controller;
  205. if (musb->g.speed == USB_SPEED_HIGH) {
  206. unsigned wait = 25;
  207. do {
  208. empty = musb_is_tx_fifo_empty(hw_ep);
  209. if (empty)
  210. break;
  211. wait--;
  212. if (!wait)
  213. break;
  214. udelay(1);
  215. } while (1);
  216. empty = musb_is_tx_fifo_empty(hw_ep);
  217. if (empty) {
  218. cppi41_trans_done(cppi41_channel);
  219. goto out;
  220. }
  221. }
  222. list_add_tail(&cppi41_channel->tx_check,
  223. &controller->early_tx_list);
  224. if (!hrtimer_active(&controller->early_tx)) {
  225. hrtimer_start_range_ns(&controller->early_tx,
  226. ktime_set(0, 140 * NSEC_PER_USEC),
  227. 40 * NSEC_PER_USEC,
  228. HRTIMER_MODE_REL);
  229. }
  230. }
  231. out:
  232. spin_unlock_irqrestore(&musb->lock, flags);
  233. }
  234. static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
  235. {
  236. unsigned shift;
  237. shift = (ep - 1) * 2;
  238. old &= ~(3 << shift);
  239. old |= mode << shift;
  240. return old;
  241. }
  242. static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
  243. unsigned mode)
  244. {
  245. struct cppi41_dma_controller *controller = cppi41_channel->controller;
  246. u32 port;
  247. u32 new_mode;
  248. u32 old_mode;
  249. if (cppi41_channel->is_tx)
  250. old_mode = controller->tx_mode;
  251. else
  252. old_mode = controller->rx_mode;
  253. port = cppi41_channel->port_num;
  254. new_mode = update_ep_mode(port, mode, old_mode);
  255. if (new_mode == old_mode)
  256. return;
  257. if (cppi41_channel->is_tx) {
  258. controller->tx_mode = new_mode;
  259. musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE,
  260. new_mode);
  261. } else {
  262. controller->rx_mode = new_mode;
  263. musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE,
  264. new_mode);
  265. }
  266. }
  267. static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
  268. unsigned mode)
  269. {
  270. struct cppi41_dma_controller *controller = cppi41_channel->controller;
  271. u32 port;
  272. u32 new_mode;
  273. u32 old_mode;
  274. old_mode = controller->auto_req;
  275. port = cppi41_channel->port_num;
  276. new_mode = update_ep_mode(port, mode, old_mode);
  277. if (new_mode == old_mode)
  278. return;
  279. controller->auto_req = new_mode;
  280. musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode);
  281. }
  282. static bool cppi41_configure_channel(struct dma_channel *channel,
  283. u16 packet_sz, u8 mode,
  284. dma_addr_t dma_addr, u32 len)
  285. {
  286. struct cppi41_dma_channel *cppi41_channel = channel->private_data;
  287. struct dma_chan *dc = cppi41_channel->dc;
  288. struct dma_async_tx_descriptor *dma_desc;
  289. enum dma_transfer_direction direction;
  290. struct musb *musb = cppi41_channel->controller->musb;
  291. unsigned use_gen_rndis = 0;
  292. dev_dbg(musb->controller,
  293. "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
  294. cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num),
  295. packet_sz, mode, (unsigned long long) dma_addr,
  296. len, cppi41_channel->is_tx);
  297. cppi41_channel->buf_addr = dma_addr;
  298. cppi41_channel->total_len = len;
  299. cppi41_channel->transferred = 0;
  300. cppi41_channel->packet_sz = packet_sz;
  301. /*
  302. * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
  303. * than max packet size at a time.
  304. */
  305. if (cppi41_channel->is_tx)
  306. use_gen_rndis = 1;
  307. if (use_gen_rndis) {
  308. /* RNDIS mode */
  309. if (len > packet_sz) {
  310. musb_writel(musb->ctrl_base,
  311. RNDIS_REG(cppi41_channel->port_num), len);
  312. /* gen rndis */
  313. cppi41_set_dma_mode(cppi41_channel,
  314. EP_MODE_DMA_GEN_RNDIS);
  315. /* auto req */
  316. cppi41_set_autoreq_mode(cppi41_channel,
  317. EP_MODE_AUTOREG_ALL_NEOP);
  318. } else {
  319. musb_writel(musb->ctrl_base,
  320. RNDIS_REG(cppi41_channel->port_num), 0);
  321. cppi41_set_dma_mode(cppi41_channel,
  322. EP_MODE_DMA_TRANSPARENT);
  323. cppi41_set_autoreq_mode(cppi41_channel,
  324. EP_MODE_AUTOREG_NONE);
  325. }
  326. } else {
  327. /* fallback mode */
  328. cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
  329. cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE);
  330. len = min_t(u32, packet_sz, len);
  331. }
  332. cppi41_channel->prog_len = len;
  333. direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
  334. dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
  335. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  336. if (!dma_desc)
  337. return false;
  338. dma_desc->callback = cppi41_dma_callback;
  339. dma_desc->callback_param = channel;
  340. cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
  341. save_rx_toggle(cppi41_channel);
  342. dma_async_issue_pending(dc);
  343. return true;
  344. }
  345. static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
  346. struct musb_hw_ep *hw_ep, u8 is_tx)
  347. {
  348. struct cppi41_dma_controller *controller = container_of(c,
  349. struct cppi41_dma_controller, controller);
  350. struct cppi41_dma_channel *cppi41_channel = NULL;
  351. u8 ch_num = hw_ep->epnum - 1;
  352. if (ch_num >= MUSB_DMA_NUM_CHANNELS)
  353. return NULL;
  354. if (is_tx)
  355. cppi41_channel = &controller->tx_channel[ch_num];
  356. else
  357. cppi41_channel = &controller->rx_channel[ch_num];
  358. if (!cppi41_channel->dc)
  359. return NULL;
  360. if (cppi41_channel->is_allocated)
  361. return NULL;
  362. cppi41_channel->hw_ep = hw_ep;
  363. cppi41_channel->is_allocated = 1;
  364. return &cppi41_channel->channel;
  365. }
  366. static void cppi41_dma_channel_release(struct dma_channel *channel)
  367. {
  368. struct cppi41_dma_channel *cppi41_channel = channel->private_data;
  369. if (cppi41_channel->is_allocated) {
  370. cppi41_channel->is_allocated = 0;
  371. channel->status = MUSB_DMA_STATUS_FREE;
  372. channel->actual_len = 0;
  373. }
  374. }
  375. static int cppi41_dma_channel_program(struct dma_channel *channel,
  376. u16 packet_sz, u8 mode,
  377. dma_addr_t dma_addr, u32 len)
  378. {
  379. int ret;
  380. BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
  381. channel->status == MUSB_DMA_STATUS_BUSY);
  382. channel->status = MUSB_DMA_STATUS_BUSY;
  383. channel->actual_len = 0;
  384. ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
  385. if (!ret)
  386. channel->status = MUSB_DMA_STATUS_FREE;
  387. return ret;
  388. }
  389. static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
  390. void *buf, u32 length)
  391. {
  392. struct cppi41_dma_channel *cppi41_channel = channel->private_data;
  393. struct cppi41_dma_controller *controller = cppi41_channel->controller;
  394. struct musb *musb = controller->musb;
  395. if (is_host_active(musb)) {
  396. WARN_ON(1);
  397. return 1;
  398. }
  399. if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
  400. return 0;
  401. if (cppi41_channel->is_tx)
  402. return 1;
  403. /* AM335x Advisory 1.0.13. No workaround for device RX mode */
  404. return 0;
  405. }
  406. static int cppi41_dma_channel_abort(struct dma_channel *channel)
  407. {
  408. struct cppi41_dma_channel *cppi41_channel = channel->private_data;
  409. struct cppi41_dma_controller *controller = cppi41_channel->controller;
  410. struct musb *musb = controller->musb;
  411. void __iomem *epio = cppi41_channel->hw_ep->regs;
  412. int tdbit;
  413. int ret;
  414. unsigned is_tx;
  415. u16 csr;
  416. is_tx = cppi41_channel->is_tx;
  417. dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n",
  418. cppi41_channel->port_num, is_tx);
  419. if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
  420. return 0;
  421. list_del_init(&cppi41_channel->tx_check);
  422. if (is_tx) {
  423. csr = musb_readw(epio, MUSB_TXCSR);
  424. csr &= ~MUSB_TXCSR_DMAENAB;
  425. musb_writew(epio, MUSB_TXCSR, csr);
  426. } else {
  427. csr = musb_readw(epio, MUSB_RXCSR);
  428. csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
  429. musb_writew(epio, MUSB_RXCSR, csr);
  430. csr = musb_readw(epio, MUSB_RXCSR);
  431. if (csr & MUSB_RXCSR_RXPKTRDY) {
  432. csr |= MUSB_RXCSR_FLUSHFIFO;
  433. musb_writew(epio, MUSB_RXCSR, csr);
  434. musb_writew(epio, MUSB_RXCSR, csr);
  435. }
  436. }
  437. tdbit = 1 << cppi41_channel->port_num;
  438. if (is_tx)
  439. tdbit <<= 16;
  440. do {
  441. musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
  442. ret = dmaengine_terminate_all(cppi41_channel->dc);
  443. } while (ret == -EAGAIN);
  444. musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
  445. if (is_tx) {
  446. csr = musb_readw(epio, MUSB_TXCSR);
  447. if (csr & MUSB_TXCSR_TXPKTRDY) {
  448. csr |= MUSB_TXCSR_FLUSHFIFO;
  449. musb_writew(epio, MUSB_TXCSR, csr);
  450. }
  451. }
  452. cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
  453. return 0;
  454. }
  455. static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
  456. {
  457. struct dma_chan *dc;
  458. int i;
  459. for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
  460. dc = ctrl->tx_channel[i].dc;
  461. if (dc)
  462. dma_release_channel(dc);
  463. dc = ctrl->rx_channel[i].dc;
  464. if (dc)
  465. dma_release_channel(dc);
  466. }
  467. }
  468. static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
  469. {
  470. cppi41_release_all_dma_chans(controller);
  471. }
  472. static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
  473. {
  474. struct musb *musb = controller->musb;
  475. struct device *dev = musb->controller;
  476. struct device_node *np = dev->of_node;
  477. struct cppi41_dma_channel *cppi41_channel;
  478. int count;
  479. int i;
  480. int ret;
  481. count = of_property_count_strings(np, "dma-names");
  482. if (count < 0)
  483. return count;
  484. for (i = 0; i < count; i++) {
  485. struct dma_chan *dc;
  486. struct dma_channel *musb_dma;
  487. const char *str;
  488. unsigned is_tx;
  489. unsigned int port;
  490. ret = of_property_read_string_index(np, "dma-names", i, &str);
  491. if (ret)
  492. goto err;
  493. if (!strncmp(str, "tx", 2))
  494. is_tx = 1;
  495. else if (!strncmp(str, "rx", 2))
  496. is_tx = 0;
  497. else {
  498. dev_err(dev, "Wrong dmatype %s\n", str);
  499. goto err;
  500. }
  501. ret = kstrtouint(str + 2, 0, &port);
  502. if (ret)
  503. goto err;
  504. ret = -EINVAL;
  505. if (port > MUSB_DMA_NUM_CHANNELS || !port)
  506. goto err;
  507. if (is_tx)
  508. cppi41_channel = &controller->tx_channel[port - 1];
  509. else
  510. cppi41_channel = &controller->rx_channel[port - 1];
  511. cppi41_channel->controller = controller;
  512. cppi41_channel->port_num = port;
  513. cppi41_channel->is_tx = is_tx;
  514. INIT_LIST_HEAD(&cppi41_channel->tx_check);
  515. musb_dma = &cppi41_channel->channel;
  516. musb_dma->private_data = cppi41_channel;
  517. musb_dma->status = MUSB_DMA_STATUS_FREE;
  518. musb_dma->max_len = SZ_4M;
  519. dc = dma_request_slave_channel(dev, str);
  520. if (!dc) {
  521. dev_err(dev, "Falied to request %s.\n", str);
  522. ret = -EPROBE_DEFER;
  523. goto err;
  524. }
  525. cppi41_channel->dc = dc;
  526. }
  527. return 0;
  528. err:
  529. cppi41_release_all_dma_chans(controller);
  530. return ret;
  531. }
  532. void dma_controller_destroy(struct dma_controller *c)
  533. {
  534. struct cppi41_dma_controller *controller = container_of(c,
  535. struct cppi41_dma_controller, controller);
  536. hrtimer_cancel(&controller->early_tx);
  537. cppi41_dma_controller_stop(controller);
  538. kfree(controller);
  539. }
  540. struct dma_controller *dma_controller_create(struct musb *musb,
  541. void __iomem *base)
  542. {
  543. struct cppi41_dma_controller *controller;
  544. int ret = 0;
  545. if (!musb->controller->of_node) {
  546. dev_err(musb->controller, "Need DT for the DMA engine.\n");
  547. return NULL;
  548. }
  549. controller = kzalloc(sizeof(*controller), GFP_KERNEL);
  550. if (!controller)
  551. goto kzalloc_fail;
  552. hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  553. controller->early_tx.function = cppi41_recheck_tx_req;
  554. INIT_LIST_HEAD(&controller->early_tx_list);
  555. controller->musb = musb;
  556. controller->controller.channel_alloc = cppi41_dma_channel_allocate;
  557. controller->controller.channel_release = cppi41_dma_channel_release;
  558. controller->controller.channel_program = cppi41_dma_channel_program;
  559. controller->controller.channel_abort = cppi41_dma_channel_abort;
  560. controller->controller.is_compatible = cppi41_is_compatible;
  561. ret = cppi41_dma_controller_start(controller);
  562. if (ret)
  563. goto plat_get_fail;
  564. return &controller->controller;
  565. plat_get_fail:
  566. kfree(controller);
  567. kzalloc_fail:
  568. if (ret == -EPROBE_DEFER)
  569. return ERR_PTR(ret);
  570. return NULL;
  571. }