fifo.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269
  1. /*
  2. * Renesas USB driver
  3. *
  4. * Copyright (C) 2011 Renesas Solutions Corp.
  5. * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  15. *
  16. */
  17. #include <linux/delay.h>
  18. #include <linux/io.h>
  19. #include <linux/scatterlist.h>
  20. #include "common.h"
  21. #include "pipe.h"
  22. #define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo))
  23. #define usbhsf_is_cfifo(p, f) (usbhsf_get_cfifo(p) == f)
  24. #define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */
  25. /*
  26. * packet initialize
  27. */
  28. void usbhs_pkt_init(struct usbhs_pkt *pkt)
  29. {
  30. INIT_LIST_HEAD(&pkt->node);
  31. }
  32. /*
  33. * packet control function
  34. */
  35. static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
  36. {
  37. struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
  38. struct device *dev = usbhs_priv_to_dev(priv);
  39. dev_err(dev, "null handler\n");
  40. return -EINVAL;
  41. }
  42. static struct usbhs_pkt_handle usbhsf_null_handler = {
  43. .prepare = usbhsf_null_handle,
  44. .try_run = usbhsf_null_handle,
  45. };
  46. void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
  47. void (*done)(struct usbhs_priv *priv,
  48. struct usbhs_pkt *pkt),
  49. void *buf, int len, int zero, int sequence)
  50. {
  51. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  52. struct device *dev = usbhs_priv_to_dev(priv);
  53. unsigned long flags;
  54. if (!done) {
  55. dev_err(dev, "no done function\n");
  56. return;
  57. }
  58. /******************** spin lock ********************/
  59. usbhs_lock(priv, flags);
  60. if (!pipe->handler) {
  61. dev_err(dev, "no handler function\n");
  62. pipe->handler = &usbhsf_null_handler;
  63. }
  64. list_move_tail(&pkt->node, &pipe->list);
  65. /*
  66. * each pkt must hold own handler.
  67. * because handler might be changed by its situation.
  68. * dma handler -> pio handler.
  69. */
  70. pkt->pipe = pipe;
  71. pkt->buf = buf;
  72. pkt->handler = pipe->handler;
  73. pkt->length = len;
  74. pkt->zero = zero;
  75. pkt->actual = 0;
  76. pkt->done = done;
  77. pkt->sequence = sequence;
  78. usbhs_unlock(priv, flags);
  79. /******************** spin unlock ******************/
  80. }
  81. static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
  82. {
  83. list_del_init(&pkt->node);
  84. }
  85. static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
  86. {
  87. if (list_empty(&pipe->list))
  88. return NULL;
  89. return list_first_entry(&pipe->list, struct usbhs_pkt, node);
  90. }
  91. static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
  92. struct usbhs_fifo *fifo);
  93. static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
  94. struct usbhs_fifo *fifo);
  95. static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
  96. struct usbhs_pkt *pkt);
  97. #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
  98. #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
  99. static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
  100. struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
  101. {
  102. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  103. struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
  104. unsigned long flags;
  105. /******************** spin lock ********************/
  106. usbhs_lock(priv, flags);
  107. usbhs_pipe_disable(pipe);
  108. if (!pkt)
  109. pkt = __usbhsf_pkt_get(pipe);
  110. if (pkt) {
  111. struct dma_chan *chan = NULL;
  112. if (fifo)
  113. chan = usbhsf_dma_chan_get(fifo, pkt);
  114. if (chan) {
  115. dmaengine_terminate_all(chan);
  116. usbhsf_fifo_clear(pipe, fifo);
  117. usbhsf_dma_unmap(pkt);
  118. }
  119. __usbhsf_pkt_del(pkt);
  120. }
  121. if (fifo)
  122. usbhsf_fifo_unselect(pipe, fifo);
  123. usbhs_unlock(priv, flags);
  124. /******************** spin unlock ******************/
  125. return pkt;
  126. }
  127. enum {
  128. USBHSF_PKT_PREPARE,
  129. USBHSF_PKT_TRY_RUN,
  130. USBHSF_PKT_DMA_DONE,
  131. };
  132. static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
  133. {
  134. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  135. struct usbhs_pkt *pkt;
  136. struct device *dev = usbhs_priv_to_dev(priv);
  137. int (*func)(struct usbhs_pkt *pkt, int *is_done);
  138. unsigned long flags;
  139. int ret = 0;
  140. int is_done = 0;
  141. /******************** spin lock ********************/
  142. usbhs_lock(priv, flags);
  143. pkt = __usbhsf_pkt_get(pipe);
  144. if (!pkt)
  145. goto __usbhs_pkt_handler_end;
  146. switch (type) {
  147. case USBHSF_PKT_PREPARE:
  148. func = pkt->handler->prepare;
  149. break;
  150. case USBHSF_PKT_TRY_RUN:
  151. func = pkt->handler->try_run;
  152. break;
  153. case USBHSF_PKT_DMA_DONE:
  154. func = pkt->handler->dma_done;
  155. break;
  156. default:
  157. dev_err(dev, "unknown pkt handler\n");
  158. goto __usbhs_pkt_handler_end;
  159. }
  160. ret = func(pkt, &is_done);
  161. if (is_done)
  162. __usbhsf_pkt_del(pkt);
  163. __usbhs_pkt_handler_end:
  164. usbhs_unlock(priv, flags);
  165. /******************** spin unlock ******************/
  166. if (is_done) {
  167. pkt->done(priv, pkt);
  168. usbhs_pkt_start(pipe);
  169. }
  170. return ret;
  171. }
  172. void usbhs_pkt_start(struct usbhs_pipe *pipe)
  173. {
  174. usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
  175. }
  176. /*
  177. * irq enable/disable function
  178. */
  179. #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
  180. #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
  181. #define usbhsf_irq_callback_ctrl(pipe, status, enable) \
  182. ({ \
  183. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); \
  184. struct usbhs_mod *mod = usbhs_mod_get_current(priv); \
  185. u16 status = (1 << usbhs_pipe_number(pipe)); \
  186. if (!mod) \
  187. return; \
  188. if (enable) \
  189. mod->status |= status; \
  190. else \
  191. mod->status &= ~status; \
  192. usbhs_irq_callback_update(priv, mod); \
  193. })
  194. static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
  195. {
  196. /*
  197. * And DCP pipe can NOT use "ready interrupt" for "send"
  198. * it should use "empty" interrupt.
  199. * see
  200. * "Operation" - "Interrupt Function" - "BRDY Interrupt"
  201. *
  202. * on the other hand, normal pipe can use "ready interrupt" for "send"
  203. * even though it is single/double buffer
  204. */
  205. if (usbhs_pipe_is_dcp(pipe))
  206. usbhsf_irq_empty_ctrl(pipe, enable);
  207. else
  208. usbhsf_irq_ready_ctrl(pipe, enable);
  209. }
  210. static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
  211. {
  212. usbhsf_irq_ready_ctrl(pipe, enable);
  213. }
  214. /*
  215. * FIFO ctrl
  216. */
  217. static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
  218. struct usbhs_fifo *fifo)
  219. {
  220. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  221. usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
  222. }
  223. static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
  224. struct usbhs_fifo *fifo)
  225. {
  226. int timeout = 1024;
  227. do {
  228. /* The FIFO port is accessible */
  229. if (usbhs_read(priv, fifo->ctr) & FRDY)
  230. return 0;
  231. udelay(10);
  232. } while (timeout--);
  233. return -EBUSY;
  234. }
  235. static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
  236. struct usbhs_fifo *fifo)
  237. {
  238. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  239. if (!usbhs_pipe_is_dcp(pipe))
  240. usbhsf_fifo_barrier(priv, fifo);
  241. usbhs_write(priv, fifo->ctr, BCLR);
  242. }
  243. static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
  244. struct usbhs_fifo *fifo)
  245. {
  246. return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
  247. }
  248. static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
  249. struct usbhs_fifo *fifo)
  250. {
  251. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  252. usbhs_pipe_select_fifo(pipe, NULL);
  253. usbhs_write(priv, fifo->sel, 0);
  254. }
  255. static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
  256. struct usbhs_fifo *fifo,
  257. int write)
  258. {
  259. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  260. struct device *dev = usbhs_priv_to_dev(priv);
  261. int timeout = 1024;
  262. u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */
  263. u16 base = usbhs_pipe_number(pipe); /* CURPIPE */
  264. if (usbhs_pipe_is_busy(pipe) ||
  265. usbhsf_fifo_is_busy(fifo))
  266. return -EBUSY;
  267. if (usbhs_pipe_is_dcp(pipe)) {
  268. base |= (1 == write) << 5; /* ISEL */
  269. if (usbhs_mod_is_host(priv))
  270. usbhs_dcp_dir_for_host(pipe, write);
  271. }
  272. /* "base" will be used below */
  273. if (usbhs_get_dparam(priv, has_sudmac) && !usbhsf_is_cfifo(priv, fifo))
  274. usbhs_write(priv, fifo->sel, base);
  275. else
  276. usbhs_write(priv, fifo->sel, base | MBW_32);
  277. /* check ISEL and CURPIPE value */
  278. while (timeout--) {
  279. if (base == (mask & usbhs_read(priv, fifo->sel))) {
  280. usbhs_pipe_select_fifo(pipe, fifo);
  281. return 0;
  282. }
  283. udelay(10);
  284. }
  285. dev_err(dev, "fifo select error\n");
  286. return -EIO;
  287. }
  288. /*
  289. * DCP status stage
  290. */
  291. static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
  292. {
  293. struct usbhs_pipe *pipe = pkt->pipe;
  294. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  295. struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
  296. struct device *dev = usbhs_priv_to_dev(priv);
  297. int ret;
  298. usbhs_pipe_disable(pipe);
  299. ret = usbhsf_fifo_select(pipe, fifo, 1);
  300. if (ret < 0) {
  301. dev_err(dev, "%s() faile\n", __func__);
  302. return ret;
  303. }
  304. usbhs_pipe_sequence_data1(pipe); /* DATA1 */
  305. usbhsf_fifo_clear(pipe, fifo);
  306. usbhsf_send_terminator(pipe, fifo);
  307. usbhsf_fifo_unselect(pipe, fifo);
  308. usbhsf_tx_irq_ctrl(pipe, 1);
  309. usbhs_pipe_enable(pipe);
  310. return ret;
  311. }
  312. static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
  313. {
  314. struct usbhs_pipe *pipe = pkt->pipe;
  315. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  316. struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
  317. struct device *dev = usbhs_priv_to_dev(priv);
  318. int ret;
  319. usbhs_pipe_disable(pipe);
  320. ret = usbhsf_fifo_select(pipe, fifo, 0);
  321. if (ret < 0) {
  322. dev_err(dev, "%s() fail\n", __func__);
  323. return ret;
  324. }
  325. usbhs_pipe_sequence_data1(pipe); /* DATA1 */
  326. usbhsf_fifo_clear(pipe, fifo);
  327. usbhsf_fifo_unselect(pipe, fifo);
  328. usbhsf_rx_irq_ctrl(pipe, 1);
  329. usbhs_pipe_enable(pipe);
  330. return ret;
  331. }
  332. static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
  333. {
  334. struct usbhs_pipe *pipe = pkt->pipe;
  335. if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
  336. usbhsf_tx_irq_ctrl(pipe, 0);
  337. else
  338. usbhsf_rx_irq_ctrl(pipe, 0);
  339. pkt->actual = pkt->length;
  340. *is_done = 1;
  341. return 0;
  342. }
  343. struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
  344. .prepare = usbhs_dcp_dir_switch_to_write,
  345. .try_run = usbhs_dcp_dir_switch_done,
  346. };
  347. struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
  348. .prepare = usbhs_dcp_dir_switch_to_read,
  349. .try_run = usbhs_dcp_dir_switch_done,
  350. };
  351. /*
  352. * DCP data stage (push)
  353. */
  354. static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
  355. {
  356. struct usbhs_pipe *pipe = pkt->pipe;
  357. usbhs_pipe_sequence_data1(pipe); /* DATA1 */
  358. /*
  359. * change handler to PIO push
  360. */
  361. pkt->handler = &usbhs_fifo_pio_push_handler;
  362. return pkt->handler->prepare(pkt, is_done);
  363. }
  364. struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
  365. .prepare = usbhsf_dcp_data_stage_try_push,
  366. };
  367. /*
  368. * DCP data stage (pop)
  369. */
  370. static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
  371. int *is_done)
  372. {
  373. struct usbhs_pipe *pipe = pkt->pipe;
  374. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  375. struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
  376. if (usbhs_pipe_is_busy(pipe))
  377. return 0;
  378. /*
  379. * prepare pop for DCP should
  380. * - change DCP direction,
  381. * - clear fifo
  382. * - DATA1
  383. */
  384. usbhs_pipe_disable(pipe);
  385. usbhs_pipe_sequence_data1(pipe); /* DATA1 */
  386. usbhsf_fifo_select(pipe, fifo, 0);
  387. usbhsf_fifo_clear(pipe, fifo);
  388. usbhsf_fifo_unselect(pipe, fifo);
  389. /*
  390. * change handler to PIO pop
  391. */
  392. pkt->handler = &usbhs_fifo_pio_pop_handler;
  393. return pkt->handler->prepare(pkt, is_done);
  394. }
  395. struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
  396. .prepare = usbhsf_dcp_data_stage_prepare_pop,
  397. };
  398. /*
  399. * PIO push handler
  400. */
  401. static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
  402. {
  403. struct usbhs_pipe *pipe = pkt->pipe;
  404. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  405. struct device *dev = usbhs_priv_to_dev(priv);
  406. struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
  407. void __iomem *addr = priv->base + fifo->port;
  408. u8 *buf;
  409. int maxp = usbhs_pipe_get_maxpacket(pipe);
  410. int total_len;
  411. int i, ret, len;
  412. int is_short;
  413. usbhs_pipe_data_sequence(pipe, pkt->sequence);
  414. pkt->sequence = -1; /* -1 sequence will be ignored */
  415. usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
  416. ret = usbhsf_fifo_select(pipe, fifo, 1);
  417. if (ret < 0)
  418. return 0;
  419. ret = usbhs_pipe_is_accessible(pipe);
  420. if (ret < 0) {
  421. /* inaccessible pipe is not an error */
  422. ret = 0;
  423. goto usbhs_fifo_write_busy;
  424. }
  425. ret = usbhsf_fifo_barrier(priv, fifo);
  426. if (ret < 0)
  427. goto usbhs_fifo_write_busy;
  428. buf = pkt->buf + pkt->actual;
  429. len = pkt->length - pkt->actual;
  430. len = min(len, maxp);
  431. total_len = len;
  432. is_short = total_len < maxp;
  433. /*
  434. * FIXME
  435. *
  436. * 32-bit access only
  437. */
  438. if (len >= 4 && !((unsigned long)buf & 0x03)) {
  439. iowrite32_rep(addr, buf, len / 4);
  440. len %= 4;
  441. buf += total_len - len;
  442. }
  443. /* the rest operation */
  444. for (i = 0; i < len; i++)
  445. iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
  446. /*
  447. * variable update
  448. */
  449. pkt->actual += total_len;
  450. if (pkt->actual < pkt->length)
  451. *is_done = 0; /* there are remainder data */
  452. else if (is_short)
  453. *is_done = 1; /* short packet */
  454. else
  455. *is_done = !pkt->zero; /* send zero packet ? */
  456. /*
  457. * pipe/irq handling
  458. */
  459. if (is_short)
  460. usbhsf_send_terminator(pipe, fifo);
  461. usbhsf_tx_irq_ctrl(pipe, !*is_done);
  462. usbhs_pipe_running(pipe, !*is_done);
  463. usbhs_pipe_enable(pipe);
  464. dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
  465. usbhs_pipe_number(pipe),
  466. pkt->length, pkt->actual, *is_done, pkt->zero);
  467. usbhsf_fifo_unselect(pipe, fifo);
  468. return 0;
  469. usbhs_fifo_write_busy:
  470. usbhsf_fifo_unselect(pipe, fifo);
  471. /*
  472. * pipe is busy.
  473. * retry in interrupt
  474. */
  475. usbhsf_tx_irq_ctrl(pipe, 1);
  476. usbhs_pipe_running(pipe, 1);
  477. return ret;
  478. }
  479. static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
  480. {
  481. if (usbhs_pipe_is_running(pkt->pipe))
  482. return 0;
  483. return usbhsf_pio_try_push(pkt, is_done);
  484. }
  485. struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
  486. .prepare = usbhsf_pio_prepare_push,
  487. .try_run = usbhsf_pio_try_push,
  488. };
  489. /*
  490. * PIO pop handler
  491. */
  492. static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
  493. {
  494. struct usbhs_pipe *pipe = pkt->pipe;
  495. if (usbhs_pipe_is_busy(pipe))
  496. return 0;
  497. if (usbhs_pipe_is_running(pipe))
  498. return 0;
  499. /*
  500. * pipe enable to prepare packet receive
  501. */
  502. usbhs_pipe_data_sequence(pipe, pkt->sequence);
  503. pkt->sequence = -1; /* -1 sequence will be ignored */
  504. usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
  505. usbhs_pipe_enable(pipe);
  506. usbhs_pipe_running(pipe, 1);
  507. usbhsf_rx_irq_ctrl(pipe, 1);
  508. return 0;
  509. }
  510. static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
  511. {
  512. struct usbhs_pipe *pipe = pkt->pipe;
  513. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  514. struct device *dev = usbhs_priv_to_dev(priv);
  515. struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
  516. void __iomem *addr = priv->base + fifo->port;
  517. u8 *buf;
  518. u32 data = 0;
  519. int maxp = usbhs_pipe_get_maxpacket(pipe);
  520. int rcv_len, len;
  521. int i, ret;
  522. int total_len = 0;
  523. ret = usbhsf_fifo_select(pipe, fifo, 0);
  524. if (ret < 0)
  525. return 0;
  526. ret = usbhsf_fifo_barrier(priv, fifo);
  527. if (ret < 0)
  528. goto usbhs_fifo_read_busy;
  529. rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
  530. buf = pkt->buf + pkt->actual;
  531. len = pkt->length - pkt->actual;
  532. len = min(len, rcv_len);
  533. total_len = len;
  534. /*
  535. * update actual length first here to decide disable pipe.
  536. * if this pipe keeps BUF status and all data were popped,
  537. * then, next interrupt/token will be issued again
  538. */
  539. pkt->actual += total_len;
  540. if ((pkt->actual == pkt->length) || /* receive all data */
  541. (total_len < maxp)) { /* short packet */
  542. *is_done = 1;
  543. usbhsf_rx_irq_ctrl(pipe, 0);
  544. usbhs_pipe_running(pipe, 0);
  545. usbhs_pipe_disable(pipe); /* disable pipe first */
  546. }
  547. /*
  548. * Buffer clear if Zero-Length packet
  549. *
  550. * see
  551. * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
  552. */
  553. if (0 == rcv_len) {
  554. pkt->zero = 1;
  555. usbhsf_fifo_clear(pipe, fifo);
  556. goto usbhs_fifo_read_end;
  557. }
  558. /*
  559. * FIXME
  560. *
  561. * 32-bit access only
  562. */
  563. if (len >= 4 && !((unsigned long)buf & 0x03)) {
  564. ioread32_rep(addr, buf, len / 4);
  565. len %= 4;
  566. buf += total_len - len;
  567. }
  568. /* the rest operation */
  569. for (i = 0; i < len; i++) {
  570. if (!(i & 0x03))
  571. data = ioread32(addr);
  572. buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
  573. }
  574. usbhs_fifo_read_end:
  575. dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n",
  576. usbhs_pipe_number(pipe),
  577. pkt->length, pkt->actual, *is_done, pkt->zero);
  578. usbhs_fifo_read_busy:
  579. usbhsf_fifo_unselect(pipe, fifo);
  580. return ret;
  581. }
  582. struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
  583. .prepare = usbhsf_prepare_pop,
  584. .try_run = usbhsf_pio_try_pop,
  585. };
  586. /*
  587. * DCP ctrol statge handler
  588. */
  589. static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
  590. {
  591. usbhs_dcp_control_transfer_done(pkt->pipe);
  592. *is_done = 1;
  593. return 0;
  594. }
  595. struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
  596. .prepare = usbhsf_ctrl_stage_end,
  597. .try_run = usbhsf_ctrl_stage_end,
  598. };
  599. /*
  600. * DMA fifo functions
  601. */
  602. static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
  603. struct usbhs_pkt *pkt)
  604. {
  605. if (&usbhs_fifo_dma_push_handler == pkt->handler)
  606. return fifo->tx_chan;
  607. if (&usbhs_fifo_dma_pop_handler == pkt->handler)
  608. return fifo->rx_chan;
  609. return NULL;
  610. }
  611. static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
  612. struct usbhs_pkt *pkt)
  613. {
  614. struct usbhs_fifo *fifo;
  615. int i;
  616. usbhs_for_each_dfifo(priv, fifo, i) {
  617. if (usbhsf_dma_chan_get(fifo, pkt) &&
  618. !usbhsf_fifo_is_busy(fifo))
  619. return fifo;
  620. }
  621. return NULL;
  622. }
  623. #define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE)
  624. #define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0)
  625. static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
  626. struct usbhs_fifo *fifo,
  627. u16 dreqe)
  628. {
  629. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  630. usbhs_bset(priv, fifo->sel, DREQE, dreqe);
  631. }
  632. static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
  633. {
  634. struct usbhs_pipe *pipe = pkt->pipe;
  635. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  636. struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
  637. return info->dma_map_ctrl(pkt, map);
  638. }
  639. static void usbhsf_dma_complete(void *arg);
  640. static void xfer_work(struct work_struct *work)
  641. {
  642. struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
  643. struct usbhs_pipe *pipe = pkt->pipe;
  644. struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
  645. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  646. struct dma_async_tx_descriptor *desc;
  647. struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
  648. struct device *dev = usbhs_priv_to_dev(priv);
  649. enum dma_transfer_direction dir;
  650. dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
  651. desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
  652. pkt->trans, dir,
  653. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  654. if (!desc)
  655. return;
  656. desc->callback = usbhsf_dma_complete;
  657. desc->callback_param = pipe;
  658. if (dmaengine_submit(desc) < 0) {
  659. dev_err(dev, "Failed to submit dma descriptor\n");
  660. return;
  661. }
  662. dev_dbg(dev, " %s %d (%d/ %d)\n",
  663. fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
  664. usbhs_pipe_running(pipe, 1);
  665. usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
  666. usbhs_pipe_enable(pipe);
  667. usbhsf_dma_start(pipe, fifo);
  668. dma_async_issue_pending(chan);
  669. }
  670. /*
  671. * DMA push handler
  672. */
  673. static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
  674. {
  675. struct usbhs_pipe *pipe = pkt->pipe;
  676. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  677. struct usbhs_fifo *fifo;
  678. int len = pkt->length - pkt->actual;
  679. int ret;
  680. if (usbhs_pipe_is_busy(pipe))
  681. return 0;
  682. /* use PIO if packet is less than pio_dma_border or pipe is DCP */
  683. if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
  684. usbhs_pipe_is_dcp(pipe))
  685. goto usbhsf_pio_prepare_push;
  686. if (len & 0x7) /* 8byte alignment */
  687. goto usbhsf_pio_prepare_push;
  688. if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
  689. goto usbhsf_pio_prepare_push;
  690. /* return at this time if the pipe is running */
  691. if (usbhs_pipe_is_running(pipe))
  692. return 0;
  693. /* get enable DMA fifo */
  694. fifo = usbhsf_get_dma_fifo(priv, pkt);
  695. if (!fifo)
  696. goto usbhsf_pio_prepare_push;
  697. if (usbhsf_dma_map(pkt) < 0)
  698. goto usbhsf_pio_prepare_push;
  699. ret = usbhsf_fifo_select(pipe, fifo, 0);
  700. if (ret < 0)
  701. goto usbhsf_pio_prepare_push_unmap;
  702. pkt->trans = len;
  703. INIT_WORK(&pkt->work, xfer_work);
  704. schedule_work(&pkt->work);
  705. return 0;
  706. usbhsf_pio_prepare_push_unmap:
  707. usbhsf_dma_unmap(pkt);
  708. usbhsf_pio_prepare_push:
  709. /*
  710. * change handler to PIO
  711. */
  712. pkt->handler = &usbhs_fifo_pio_push_handler;
  713. return pkt->handler->prepare(pkt, is_done);
  714. }
  715. static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
  716. {
  717. struct usbhs_pipe *pipe = pkt->pipe;
  718. int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe);
  719. pkt->actual += pkt->trans;
  720. if (pkt->actual < pkt->length)
  721. *is_done = 0; /* there are remainder data */
  722. else if (is_short)
  723. *is_done = 1; /* short packet */
  724. else
  725. *is_done = !pkt->zero; /* send zero packet? */
  726. usbhs_pipe_running(pipe, !*is_done);
  727. usbhsf_dma_stop(pipe, pipe->fifo);
  728. usbhsf_dma_unmap(pkt);
  729. usbhsf_fifo_unselect(pipe, pipe->fifo);
  730. if (!*is_done) {
  731. /* change handler to PIO */
  732. pkt->handler = &usbhs_fifo_pio_push_handler;
  733. return pkt->handler->try_run(pkt, is_done);
  734. }
  735. return 0;
  736. }
  737. struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
  738. .prepare = usbhsf_dma_prepare_push,
  739. .dma_done = usbhsf_dma_push_done,
  740. };
  741. /*
  742. * DMA pop handler
  743. */
  744. static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
  745. {
  746. struct usbhs_pipe *pipe = pkt->pipe;
  747. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  748. struct usbhs_fifo *fifo;
  749. int len, ret;
  750. if (usbhs_pipe_is_busy(pipe))
  751. return 0;
  752. if (usbhs_pipe_is_dcp(pipe))
  753. goto usbhsf_pio_prepare_pop;
  754. /* get enable DMA fifo */
  755. fifo = usbhsf_get_dma_fifo(priv, pkt);
  756. if (!fifo)
  757. goto usbhsf_pio_prepare_pop;
  758. if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
  759. goto usbhsf_pio_prepare_pop;
  760. ret = usbhsf_fifo_select(pipe, fifo, 0);
  761. if (ret < 0)
  762. goto usbhsf_pio_prepare_pop;
  763. /* use PIO if packet is less than pio_dma_border */
  764. len = usbhsf_fifo_rcv_len(priv, fifo);
  765. len = min(pkt->length - pkt->actual, len);
  766. if (len & 0x7) /* 8byte alignment */
  767. goto usbhsf_pio_prepare_pop_unselect;
  768. if (len < usbhs_get_dparam(priv, pio_dma_border))
  769. goto usbhsf_pio_prepare_pop_unselect;
  770. ret = usbhsf_fifo_barrier(priv, fifo);
  771. if (ret < 0)
  772. goto usbhsf_pio_prepare_pop_unselect;
  773. if (usbhsf_dma_map(pkt) < 0)
  774. goto usbhsf_pio_prepare_pop_unselect;
  775. /* DMA */
  776. /*
  777. * usbhs_fifo_dma_pop_handler :: prepare
  778. * enabled irq to come here.
  779. * but it is no longer needed for DMA. disable it.
  780. */
  781. usbhsf_rx_irq_ctrl(pipe, 0);
  782. pkt->trans = len;
  783. INIT_WORK(&pkt->work, xfer_work);
  784. schedule_work(&pkt->work);
  785. return 0;
  786. usbhsf_pio_prepare_pop_unselect:
  787. usbhsf_fifo_unselect(pipe, fifo);
  788. usbhsf_pio_prepare_pop:
  789. /*
  790. * change handler to PIO
  791. */
  792. pkt->handler = &usbhs_fifo_pio_pop_handler;
  793. return pkt->handler->try_run(pkt, is_done);
  794. }
  795. static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
  796. {
  797. struct usbhs_pipe *pipe = pkt->pipe;
  798. int maxp = usbhs_pipe_get_maxpacket(pipe);
  799. usbhsf_dma_stop(pipe, pipe->fifo);
  800. usbhsf_dma_unmap(pkt);
  801. usbhsf_fifo_unselect(pipe, pipe->fifo);
  802. pkt->actual += pkt->trans;
  803. if ((pkt->actual == pkt->length) || /* receive all data */
  804. (pkt->trans < maxp)) { /* short packet */
  805. *is_done = 1;
  806. usbhs_pipe_running(pipe, 0);
  807. } else {
  808. /* re-enable */
  809. usbhs_pipe_running(pipe, 0);
  810. usbhsf_prepare_pop(pkt, is_done);
  811. }
  812. return 0;
  813. }
  814. struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
  815. .prepare = usbhsf_prepare_pop,
  816. .try_run = usbhsf_dma_try_pop,
  817. .dma_done = usbhsf_dma_pop_done
  818. };
  819. /*
  820. * DMA setting
  821. */
  822. static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
  823. {
  824. struct sh_dmae_slave *slave = param;
  825. /*
  826. * FIXME
  827. *
  828. * usbhs doesn't recognize id = 0 as valid DMA
  829. */
  830. if (0 == slave->shdma_slave.slave_id)
  831. return false;
  832. chan->private = slave;
  833. return true;
  834. }
  835. static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
  836. {
  837. if (fifo->tx_chan)
  838. dma_release_channel(fifo->tx_chan);
  839. if (fifo->rx_chan)
  840. dma_release_channel(fifo->rx_chan);
  841. fifo->tx_chan = NULL;
  842. fifo->rx_chan = NULL;
  843. }
  844. static void usbhsf_dma_init_pdev(struct usbhs_fifo *fifo)
  845. {
  846. dma_cap_mask_t mask;
  847. dma_cap_zero(mask);
  848. dma_cap_set(DMA_SLAVE, mask);
  849. fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
  850. &fifo->tx_slave);
  851. dma_cap_zero(mask);
  852. dma_cap_set(DMA_SLAVE, mask);
  853. fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
  854. &fifo->rx_slave);
  855. }
  856. static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo)
  857. {
  858. fifo->tx_chan = dma_request_slave_channel_reason(dev, "tx");
  859. if (IS_ERR(fifo->tx_chan))
  860. fifo->tx_chan = NULL;
  861. fifo->rx_chan = dma_request_slave_channel_reason(dev, "rx");
  862. if (IS_ERR(fifo->rx_chan))
  863. fifo->rx_chan = NULL;
  864. }
  865. static void usbhsf_dma_init(struct usbhs_priv *priv,
  866. struct usbhs_fifo *fifo)
  867. {
  868. struct device *dev = usbhs_priv_to_dev(priv);
  869. if (dev->of_node)
  870. usbhsf_dma_init_dt(dev, fifo);
  871. else
  872. usbhsf_dma_init_pdev(fifo);
  873. if (fifo->tx_chan || fifo->rx_chan)
  874. dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
  875. fifo->name,
  876. fifo->tx_chan ? "[TX]" : " ",
  877. fifo->rx_chan ? "[RX]" : " ");
  878. }
  879. /*
  880. * irq functions
  881. */
  882. static int usbhsf_irq_empty(struct usbhs_priv *priv,
  883. struct usbhs_irq_state *irq_state)
  884. {
  885. struct usbhs_pipe *pipe;
  886. struct device *dev = usbhs_priv_to_dev(priv);
  887. int i, ret;
  888. if (!irq_state->bempsts) {
  889. dev_err(dev, "debug %s !!\n", __func__);
  890. return -EIO;
  891. }
  892. dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
  893. /*
  894. * search interrupted "pipe"
  895. * not "uep".
  896. */
  897. usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
  898. if (!(irq_state->bempsts & (1 << i)))
  899. continue;
  900. ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
  901. if (ret < 0)
  902. dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
  903. }
  904. return 0;
  905. }
  906. static int usbhsf_irq_ready(struct usbhs_priv *priv,
  907. struct usbhs_irq_state *irq_state)
  908. {
  909. struct usbhs_pipe *pipe;
  910. struct device *dev = usbhs_priv_to_dev(priv);
  911. int i, ret;
  912. if (!irq_state->brdysts) {
  913. dev_err(dev, "debug %s !!\n", __func__);
  914. return -EIO;
  915. }
  916. dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
  917. /*
  918. * search interrupted "pipe"
  919. * not "uep".
  920. */
  921. usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
  922. if (!(irq_state->brdysts & (1 << i)))
  923. continue;
  924. ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
  925. if (ret < 0)
  926. dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
  927. }
  928. return 0;
  929. }
  930. static void usbhsf_dma_complete(void *arg)
  931. {
  932. struct usbhs_pipe *pipe = arg;
  933. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  934. struct device *dev = usbhs_priv_to_dev(priv);
  935. int ret;
  936. ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
  937. if (ret < 0)
  938. dev_err(dev, "dma_complete run_error %d : %d\n",
  939. usbhs_pipe_number(pipe), ret);
  940. }
  941. void usbhs_fifo_clear_dcp(struct usbhs_pipe *pipe)
  942. {
  943. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  944. struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
  945. /* clear DCP FIFO of transmission */
  946. if (usbhsf_fifo_select(pipe, fifo, 1) < 0)
  947. return;
  948. usbhsf_fifo_clear(pipe, fifo);
  949. usbhsf_fifo_unselect(pipe, fifo);
  950. /* clear DCP FIFO of reception */
  951. if (usbhsf_fifo_select(pipe, fifo, 0) < 0)
  952. return;
  953. usbhsf_fifo_clear(pipe, fifo);
  954. usbhsf_fifo_unselect(pipe, fifo);
  955. }
  956. /*
  957. * fifo init
  958. */
  959. void usbhs_fifo_init(struct usbhs_priv *priv)
  960. {
  961. struct usbhs_mod *mod = usbhs_mod_get_current(priv);
  962. struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
  963. struct usbhs_fifo *dfifo;
  964. int i;
  965. mod->irq_empty = usbhsf_irq_empty;
  966. mod->irq_ready = usbhsf_irq_ready;
  967. mod->irq_bempsts = 0;
  968. mod->irq_brdysts = 0;
  969. cfifo->pipe = NULL;
  970. usbhs_for_each_dfifo(priv, dfifo, i)
  971. dfifo->pipe = NULL;
  972. }
  973. void usbhs_fifo_quit(struct usbhs_priv *priv)
  974. {
  975. struct usbhs_mod *mod = usbhs_mod_get_current(priv);
  976. mod->irq_empty = NULL;
  977. mod->irq_ready = NULL;
  978. mod->irq_bempsts = 0;
  979. mod->irq_brdysts = 0;
  980. }
  981. #define __USBHS_DFIFO_INIT(priv, fifo, channel, fifo_port) \
  982. do { \
  983. fifo = usbhsf_get_dnfifo(priv, channel); \
  984. fifo->name = "D"#channel"FIFO"; \
  985. fifo->port = fifo_port; \
  986. fifo->sel = D##channel##FIFOSEL; \
  987. fifo->ctr = D##channel##FIFOCTR; \
  988. fifo->tx_slave.shdma_slave.slave_id = \
  989. usbhs_get_dparam(priv, d##channel##_tx_id); \
  990. fifo->rx_slave.shdma_slave.slave_id = \
  991. usbhs_get_dparam(priv, d##channel##_rx_id); \
  992. usbhsf_dma_init(priv, fifo); \
  993. } while (0)
  994. #define USBHS_DFIFO_INIT(priv, fifo, channel) \
  995. __USBHS_DFIFO_INIT(priv, fifo, channel, D##channel##FIFO)
  996. #define USBHS_DFIFO_INIT_NO_PORT(priv, fifo, channel) \
  997. __USBHS_DFIFO_INIT(priv, fifo, channel, 0)
  998. int usbhs_fifo_probe(struct usbhs_priv *priv)
  999. {
  1000. struct usbhs_fifo *fifo;
  1001. /* CFIFO */
  1002. fifo = usbhsf_get_cfifo(priv);
  1003. fifo->name = "CFIFO";
  1004. fifo->port = CFIFO;
  1005. fifo->sel = CFIFOSEL;
  1006. fifo->ctr = CFIFOCTR;
  1007. /* DFIFO */
  1008. USBHS_DFIFO_INIT(priv, fifo, 0);
  1009. USBHS_DFIFO_INIT(priv, fifo, 1);
  1010. USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 2);
  1011. USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 3);
  1012. return 0;
  1013. }
  1014. void usbhs_fifo_remove(struct usbhs_priv *priv)
  1015. {
  1016. struct usbhs_fifo *fifo;
  1017. int i;
  1018. usbhs_for_each_dfifo(priv, fifo, i)
  1019. usbhsf_dma_quit(priv, fifo);
  1020. }