epn.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
  4. *
  5. * epn.c - Generic endpoints management
  6. *
  7. * Copyright 2017 IBM Corporation
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/delay.h>
  18. #include <linux/ioport.h>
  19. #include <linux/slab.h>
  20. #include <linux/errno.h>
  21. #include <linux/list.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/proc_fs.h>
  24. #include <linux/prefetch.h>
  25. #include <linux/clk.h>
  26. #include <linux/usb/gadget.h>
  27. #include <linux/of.h>
  28. #include <linux/of_gpio.h>
  29. #include <linux/regmap.h>
  30. #include <linux/dma-mapping.h>
  31. #include "vhub.h"
  32. #define EXTRA_CHECKS
  33. #ifdef EXTRA_CHECKS
  34. #define CHECK(ep, expr, fmt...) \
  35. do { \
  36. if (!(expr)) EPDBG(ep, "CHECK:" fmt); \
  37. } while(0)
  38. #else
  39. #define CHECK(ep, expr, fmt...) do { } while(0)
  40. #endif
  41. static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
  42. {
  43. unsigned int act = req->req.actual;
  44. unsigned int len = req->req.length;
  45. unsigned int chunk;
  46. /* There should be no DMA ongoing */
  47. WARN_ON(req->active);
  48. /* Calculate next chunk size */
  49. chunk = len - act;
  50. if (chunk > ep->ep.maxpacket)
  51. chunk = ep->ep.maxpacket;
  52. else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
  53. req->last_desc = 1;
  54. EPVDBG(ep, "kick req %p act=%d/%d chunk=%d last=%d\n",
  55. req, act, len, chunk, req->last_desc);
  56. /* If DMA unavailable, using staging EP buffer */
  57. if (!req->req.dma) {
  58. /* For IN transfers, copy data over first */
  59. if (ep->epn.is_in) {
  60. memcpy(ep->buf, req->req.buf + act, chunk);
  61. vhub_dma_workaround(ep->buf);
  62. }
  63. writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
  64. } else {
  65. if (ep->epn.is_in)
  66. vhub_dma_workaround(req->req.buf);
  67. writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
  68. }
  69. /* Start DMA */
  70. req->active = true;
  71. writel(VHUB_EP_DMA_SET_TX_SIZE(chunk),
  72. ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
  73. writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK,
  74. ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
  75. }
  76. static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep)
  77. {
  78. struct ast_vhub_req *req;
  79. unsigned int len;
  80. u32 stat;
  81. /* Read EP status */
  82. stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
  83. /* Grab current request if any */
  84. req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
  85. EPVDBG(ep, "ACK status=%08x is_in=%d, req=%p (active=%d)\n",
  86. stat, ep->epn.is_in, req, req ? req->active : 0);
  87. /* In absence of a request, bail out, must have been dequeued */
  88. if (!req)
  89. return;
  90. /*
  91. * Request not active, move on to processing queue, active request
  92. * was probably dequeued
  93. */
  94. if (!req->active)
  95. goto next_chunk;
  96. /* Check if HW has moved on */
  97. if (VHUB_EP_DMA_RPTR(stat) != 0) {
  98. EPDBG(ep, "DMA read pointer not 0 !\n");
  99. return;
  100. }
  101. /* No current DMA ongoing */
  102. req->active = false;
  103. /* Grab lenght out of HW */
  104. len = VHUB_EP_DMA_TX_SIZE(stat);
  105. /* If not using DMA, copy data out if needed */
  106. if (!req->req.dma && !ep->epn.is_in && len)
  107. memcpy(req->req.buf + req->req.actual, ep->buf, len);
  108. /* Adjust size */
  109. req->req.actual += len;
  110. /* Check for short packet */
  111. if (len < ep->ep.maxpacket)
  112. req->last_desc = 1;
  113. /* That's it ? complete the request and pick a new one */
  114. if (req->last_desc >= 0) {
  115. ast_vhub_done(ep, req, 0);
  116. req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req,
  117. queue);
  118. /*
  119. * Due to lock dropping inside "done" the next request could
  120. * already be active, so check for that and bail if needed.
  121. */
  122. if (!req || req->active)
  123. return;
  124. }
  125. next_chunk:
  126. ast_vhub_epn_kick(ep, req);
  127. }
  128. static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
  129. {
  130. /*
  131. * d_next == d_last means descriptor list empty to HW,
  132. * thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors
  133. * in the list
  134. */
  135. return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) &
  136. (AST_VHUB_DESCS_COUNT - 1);
  137. }
  138. static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
  139. struct ast_vhub_req *req)
  140. {
  141. struct ast_vhub_desc *desc = NULL;
  142. unsigned int act = req->act_count;
  143. unsigned int len = req->req.length;
  144. unsigned int chunk;
  145. /* Mark request active if not already */
  146. req->active = true;
  147. /* If the request was already completely written, do nothing */
  148. if (req->last_desc >= 0)
  149. return;
  150. EPVDBG(ep, "kick act=%d/%d chunk_max=%d free_descs=%d\n",
  151. act, len, ep->epn.chunk_max, ast_vhub_count_free_descs(ep));
  152. /* While we can create descriptors */
  153. while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
  154. unsigned int d_num;
  155. /* Grab next free descriptor */
  156. d_num = ep->epn.d_next;
  157. desc = &ep->epn.descs[d_num];
  158. ep->epn.d_next = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
  159. /* Calculate next chunk size */
  160. chunk = len - act;
  161. if (chunk <= ep->epn.chunk_max) {
  162. /*
  163. * Is this the last packet ? Because of having up to 8
  164. * packets in a descriptor we can't just compare "chunk"
  165. * with ep.maxpacket. We have to see if it's a multiple
  166. * of it to know if we have to send a zero packet.
  167. * Sadly that involves a modulo which is a bit expensive
  168. * but probably still better than not doing it.
  169. */
  170. if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0)
  171. req->last_desc = d_num;
  172. } else {
  173. chunk = ep->epn.chunk_max;
  174. }
  175. EPVDBG(ep, " chunk: act=%d/%d chunk=%d last=%d desc=%d free=%d\n",
  176. act, len, chunk, req->last_desc, d_num,
  177. ast_vhub_count_free_descs(ep));
  178. /* Populate descriptor */
  179. desc->w0 = cpu_to_le32(req->req.dma + act);
  180. /* Interrupt if end of request or no more descriptors */
  181. /*
  182. * TODO: Be smarter about it, if we don't have enough
  183. * descriptors request an interrupt before queue empty
  184. * or so in order to be able to populate more before
  185. * the HW runs out. This isn't a problem at the moment
  186. * as we use 256 descriptors and only put at most one
  187. * request in the ring.
  188. */
  189. desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk));
  190. if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep))
  191. desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT);
  192. /* Account packet */
  193. req->act_count = act = act + chunk;
  194. }
  195. if (likely(desc))
  196. vhub_dma_workaround(desc);
  197. /* Tell HW about new descriptors */
  198. writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
  199. ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
  200. EPVDBG(ep, "HW kicked, d_next=%d dstat=%08x\n",
  201. ep->epn.d_next, readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS));
  202. }
  203. static void ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep *ep)
  204. {
  205. struct ast_vhub_req *req;
  206. unsigned int len, d_last;
  207. u32 stat, stat1;
  208. /* Read EP status, workaround HW race */
  209. do {
  210. stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
  211. stat1 = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
  212. } while(stat != stat1);
  213. /* Extract RPTR */
  214. d_last = VHUB_EP_DMA_RPTR(stat);
  215. /* Grab current request if any */
  216. req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
  217. EPVDBG(ep, "ACK status=%08x is_in=%d ep->d_last=%d..%d\n",
  218. stat, ep->epn.is_in, ep->epn.d_last, d_last);
  219. /* Check all completed descriptors */
  220. while (ep->epn.d_last != d_last) {
  221. struct ast_vhub_desc *desc;
  222. unsigned int d_num;
  223. bool is_last_desc;
  224. /* Grab next completed descriptor */
  225. d_num = ep->epn.d_last;
  226. desc = &ep->epn.descs[d_num];
  227. ep->epn.d_last = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
  228. /* Grab len out of descriptor */
  229. len = VHUB_DSC1_IN_LEN(le32_to_cpu(desc->w1));
  230. EPVDBG(ep, " desc %d len=%d req=%p (act=%d)\n",
  231. d_num, len, req, req ? req->active : 0);
  232. /* If no active request pending, move on */
  233. if (!req || !req->active)
  234. continue;
  235. /* Adjust size */
  236. req->req.actual += len;
  237. /* Is that the last chunk ? */
  238. is_last_desc = req->last_desc == d_num;
  239. CHECK(ep, is_last_desc == (len < ep->ep.maxpacket ||
  240. (req->req.actual >= req->req.length &&
  241. !req->req.zero)),
  242. "Last packet discrepancy: last_desc=%d len=%d r.act=%d "
  243. "r.len=%d r.zero=%d mp=%d\n",
  244. is_last_desc, len, req->req.actual, req->req.length,
  245. req->req.zero, ep->ep.maxpacket);
  246. if (is_last_desc) {
  247. /*
  248. * Because we can only have one request at a time
  249. * in our descriptor list in this implementation,
  250. * d_last and ep->d_last should now be equal
  251. */
  252. CHECK(ep, d_last == ep->epn.d_last,
  253. "DMA read ptr mismatch %d vs %d\n",
  254. d_last, ep->epn.d_last);
  255. /* Note: done will drop and re-acquire the lock */
  256. ast_vhub_done(ep, req, 0);
  257. req = list_first_entry_or_null(&ep->queue,
  258. struct ast_vhub_req,
  259. queue);
  260. break;
  261. }
  262. }
  263. /* More work ? */
  264. if (req)
  265. ast_vhub_epn_kick_desc(ep, req);
  266. }
  267. void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep)
  268. {
  269. if (ep->epn.desc_mode)
  270. ast_vhub_epn_handle_ack_desc(ep);
  271. else
  272. ast_vhub_epn_handle_ack(ep);
  273. }
  274. static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
  275. gfp_t gfp_flags)
  276. {
  277. struct ast_vhub_req *req = to_ast_req(u_req);
  278. struct ast_vhub_ep *ep = to_ast_ep(u_ep);
  279. struct ast_vhub *vhub = ep->vhub;
  280. unsigned long flags;
  281. bool empty;
  282. int rc;
  283. /* Paranoid checks */
  284. if (!u_req || !u_req->complete || !u_req->buf) {
  285. dev_warn(&vhub->pdev->dev, "Bogus EPn request ! u_req=%p\n", u_req);
  286. if (u_req) {
  287. dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n",
  288. u_req->complete, req->internal);
  289. }
  290. return -EINVAL;
  291. }
  292. /* Endpoint enabled ? */
  293. if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
  294. !ep->dev->enabled || ep->dev->suspended) {
  295. EPDBG(ep, "Enqueuing request on wrong or disabled EP\n");
  296. return -ESHUTDOWN;
  297. }
  298. /* Map request for DMA if possible. For now, the rule for DMA is
  299. * that:
  300. *
  301. * * For single stage mode (no descriptors):
  302. *
  303. * - The buffer is aligned to a 8 bytes boundary (HW requirement)
  304. * - For a OUT endpoint, the request size is a multiple of the EP
  305. * packet size (otherwise the controller will DMA past the end
  306. * of the buffer if the host is sending a too long packet).
  307. *
  308. * * For descriptor mode (tx only for now), always.
  309. *
  310. * We could relax the latter by making the decision to use the bounce
  311. * buffer based on the size of a given *segment* of the request rather
  312. * than the whole request.
  313. */
  314. if (ep->epn.desc_mode ||
  315. ((((unsigned long)u_req->buf & 7) == 0) &&
  316. (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
  317. rc = usb_gadget_map_request(&ep->dev->gadget, u_req,
  318. ep->epn.is_in);
  319. if (rc) {
  320. dev_warn(&vhub->pdev->dev,
  321. "Request mapping failure %d\n", rc);
  322. return rc;
  323. }
  324. } else
  325. u_req->dma = 0;
  326. EPVDBG(ep, "enqueue req @%p\n", req);
  327. EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n",
  328. u_req->length, (u32)u_req->dma, u_req->zero,
  329. u_req->short_not_ok, u_req->no_interrupt,
  330. ep->epn.is_in);
  331. /* Initialize request progress fields */
  332. u_req->status = -EINPROGRESS;
  333. u_req->actual = 0;
  334. req->act_count = 0;
  335. req->active = false;
  336. req->last_desc = -1;
  337. spin_lock_irqsave(&vhub->lock, flags);
  338. empty = list_empty(&ep->queue);
  339. /* Add request to list and kick processing if empty */
  340. list_add_tail(&req->queue, &ep->queue);
  341. if (empty) {
  342. if (ep->epn.desc_mode)
  343. ast_vhub_epn_kick_desc(ep, req);
  344. else
  345. ast_vhub_epn_kick(ep, req);
  346. }
  347. spin_unlock_irqrestore(&vhub->lock, flags);
  348. return 0;
  349. }
  350. static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
  351. bool restart_ep)
  352. {
  353. u32 state, reg, loops;
  354. /* Stop DMA activity */
  355. writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
  356. /* Wait for it to complete */
  357. for (loops = 0; loops < 1000; loops++) {
  358. state = readl(ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
  359. state = VHUB_EP_DMA_PROC_STATUS(state);
  360. if (state == EP_DMA_PROC_RX_IDLE ||
  361. state == EP_DMA_PROC_TX_IDLE)
  362. break;
  363. udelay(1);
  364. }
  365. if (loops >= 1000)
  366. dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n");
  367. /* If we don't have to restart the endpoint, that's it */
  368. if (!restart_ep)
  369. return;
  370. /* Restart the endpoint */
  371. if (ep->epn.desc_mode) {
  372. /*
  373. * Take out descriptors by resetting the DMA read
  374. * pointer to be equal to the CPU write pointer.
  375. *
  376. * Note: If we ever support creating descriptors for
  377. * requests that aren't the head of the queue, we
  378. * may have to do something more complex here,
  379. * especially if the request being taken out is
  380. * not the current head descriptors.
  381. */
  382. reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) |
  383. VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next);
  384. writel(reg, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
  385. /* Then turn it back on */
  386. writel(ep->epn.dma_conf,
  387. ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
  388. } else {
  389. /* Single mode: just turn it back on */
  390. writel(ep->epn.dma_conf,
  391. ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
  392. }
  393. }
  394. static int ast_vhub_epn_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
  395. {
  396. struct ast_vhub_ep *ep = to_ast_ep(u_ep);
  397. struct ast_vhub *vhub = ep->vhub;
  398. struct ast_vhub_req *req;
  399. unsigned long flags;
  400. int rc = -EINVAL;
  401. spin_lock_irqsave(&vhub->lock, flags);
  402. /* Make sure it's actually queued on this endpoint */
  403. list_for_each_entry (req, &ep->queue, queue) {
  404. if (&req->req == u_req)
  405. break;
  406. }
  407. if (&req->req == u_req) {
  408. EPVDBG(ep, "dequeue req @%p active=%d\n",
  409. req, req->active);
  410. if (req->active)
  411. ast_vhub_stop_active_req(ep, true);
  412. ast_vhub_done(ep, req, -ECONNRESET);
  413. rc = 0;
  414. }
  415. spin_unlock_irqrestore(&vhub->lock, flags);
  416. return rc;
  417. }
  418. void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep)
  419. {
  420. u32 reg;
  421. if (WARN_ON(ep->d_idx == 0))
  422. return;
  423. reg = readl(ep->epn.regs + AST_VHUB_EP_CONFIG);
  424. if (ep->epn.stalled || ep->epn.wedged)
  425. reg |= VHUB_EP_CFG_STALL_CTRL;
  426. else
  427. reg &= ~VHUB_EP_CFG_STALL_CTRL;
  428. writel(reg, ep->epn.regs + AST_VHUB_EP_CONFIG);
  429. if (!ep->epn.stalled && !ep->epn.wedged)
  430. writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
  431. ep->vhub->regs + AST_VHUB_EP_TOGGLE);
  432. }
  433. static int ast_vhub_set_halt_and_wedge(struct usb_ep* u_ep, bool halt,
  434. bool wedge)
  435. {
  436. struct ast_vhub_ep *ep = to_ast_ep(u_ep);
  437. struct ast_vhub *vhub = ep->vhub;
  438. unsigned long flags;
  439. EPDBG(ep, "Set halt (%d) & wedge (%d)\n", halt, wedge);
  440. if (!u_ep || !u_ep->desc)
  441. return -EINVAL;
  442. if (ep->d_idx == 0)
  443. return 0;
  444. if (ep->epn.is_iso)
  445. return -EOPNOTSUPP;
  446. spin_lock_irqsave(&vhub->lock, flags);
  447. /* Fail with still-busy IN endpoints */
  448. if (halt && ep->epn.is_in && !list_empty(&ep->queue)) {
  449. spin_unlock_irqrestore(&vhub->lock, flags);
  450. return -EAGAIN;
  451. }
  452. ep->epn.stalled = halt;
  453. ep->epn.wedged = wedge;
  454. ast_vhub_update_epn_stall(ep);
  455. spin_unlock_irqrestore(&vhub->lock, flags);
  456. return 0;
  457. }
  458. static int ast_vhub_epn_set_halt(struct usb_ep *u_ep, int value)
  459. {
  460. return ast_vhub_set_halt_and_wedge(u_ep, value != 0, false);
  461. }
  462. static int ast_vhub_epn_set_wedge(struct usb_ep *u_ep)
  463. {
  464. return ast_vhub_set_halt_and_wedge(u_ep, true, true);
  465. }
  466. static int ast_vhub_epn_disable(struct usb_ep* u_ep)
  467. {
  468. struct ast_vhub_ep *ep = to_ast_ep(u_ep);
  469. struct ast_vhub *vhub = ep->vhub;
  470. unsigned long flags;
  471. u32 imask, ep_ier;
  472. EPDBG(ep, "Disabling !\n");
  473. spin_lock_irqsave(&vhub->lock, flags);
  474. ep->epn.enabled = false;
  475. /* Stop active DMA if any */
  476. ast_vhub_stop_active_req(ep, false);
  477. /* Disable endpoint */
  478. writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
  479. /* Disable ACK interrupt */
  480. imask = VHUB_EP_IRQ(ep->epn.g_idx);
  481. ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
  482. ep_ier &= ~imask;
  483. writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
  484. writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
  485. /* Nuke all pending requests */
  486. ast_vhub_nuke(ep, -ESHUTDOWN);
  487. /* No more descriptor associated with request */
  488. ep->ep.desc = NULL;
  489. spin_unlock_irqrestore(&vhub->lock, flags);
  490. return 0;
  491. }
  492. static int ast_vhub_epn_enable(struct usb_ep* u_ep,
  493. const struct usb_endpoint_descriptor *desc)
  494. {
  495. static const char *ep_type_string[] __maybe_unused = { "ctrl",
  496. "isoc",
  497. "bulk",
  498. "intr" };
  499. struct ast_vhub_ep *ep = to_ast_ep(u_ep);
  500. struct ast_vhub_dev *dev;
  501. struct ast_vhub *vhub;
  502. u16 maxpacket, type;
  503. unsigned long flags;
  504. u32 ep_conf, ep_ier, imask;
  505. /* Check arguments */
  506. if (!u_ep || !desc)
  507. return -EINVAL;
  508. maxpacket = usb_endpoint_maxp(desc);
  509. if (!ep->d_idx || !ep->dev ||
  510. desc->bDescriptorType != USB_DT_ENDPOINT ||
  511. maxpacket == 0 || maxpacket > ep->ep.maxpacket) {
  512. EPDBG(ep, "Invalid EP enable,d_idx=%d,dev=%p,type=%d,mp=%d/%d\n",
  513. ep->d_idx, ep->dev, desc->bDescriptorType,
  514. maxpacket, ep->ep.maxpacket);
  515. return -EINVAL;
  516. }
  517. if (ep->d_idx != usb_endpoint_num(desc)) {
  518. EPDBG(ep, "EP number mismatch !\n");
  519. return -EINVAL;
  520. }
  521. if (ep->epn.enabled) {
  522. EPDBG(ep, "Already enabled\n");
  523. return -EBUSY;
  524. }
  525. dev = ep->dev;
  526. vhub = ep->vhub;
  527. /* Check device state */
  528. if (!dev->driver) {
  529. EPDBG(ep, "Bogus device state: driver=%p speed=%d\n",
  530. dev->driver, dev->gadget.speed);
  531. return -ESHUTDOWN;
  532. }
  533. /* Grab some info from the descriptor */
  534. ep->epn.is_in = usb_endpoint_dir_in(desc);
  535. ep->ep.maxpacket = maxpacket;
  536. type = usb_endpoint_type(desc);
  537. ep->epn.d_next = ep->epn.d_last = 0;
  538. ep->epn.is_iso = false;
  539. ep->epn.stalled = false;
  540. ep->epn.wedged = false;
  541. EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n",
  542. ep->epn.is_in ? "in" : "out", ep_type_string[type],
  543. usb_endpoint_num(desc), maxpacket);
  544. /* Can we use DMA descriptor mode ? */
  545. ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in;
  546. if (ep->epn.desc_mode)
  547. memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT);
  548. /*
  549. * Large send function can send up to 8 packets from
  550. * one descriptor with a limit of 4095 bytes.
  551. */
  552. ep->epn.chunk_max = ep->ep.maxpacket;
  553. if (ep->epn.is_in) {
  554. ep->epn.chunk_max <<= 3;
  555. while (ep->epn.chunk_max > 4095)
  556. ep->epn.chunk_max -= ep->ep.maxpacket;
  557. }
  558. switch(type) {
  559. case USB_ENDPOINT_XFER_CONTROL:
  560. EPDBG(ep, "Only one control endpoint\n");
  561. return -EINVAL;
  562. case USB_ENDPOINT_XFER_INT:
  563. ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT);
  564. break;
  565. case USB_ENDPOINT_XFER_BULK:
  566. ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK);
  567. break;
  568. case USB_ENDPOINT_XFER_ISOC:
  569. ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO);
  570. ep->epn.is_iso = true;
  571. break;
  572. default:
  573. return -EINVAL;
  574. }
  575. /* Encode the rest of the EP config register */
  576. if (maxpacket < 1024)
  577. ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket);
  578. if (!ep->epn.is_in)
  579. ep_conf |= VHUB_EP_CFG_DIR_OUT;
  580. ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc));
  581. ep_conf |= VHUB_EP_CFG_ENABLE;
  582. ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1);
  583. EPVDBG(ep, "config=%08x\n", ep_conf);
  584. spin_lock_irqsave(&vhub->lock, flags);
  585. /* Disable HW and reset DMA */
  586. writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
  587. writel(VHUB_EP_DMA_CTRL_RESET,
  588. ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
  589. /* Configure and enable */
  590. writel(ep_conf, ep->epn.regs + AST_VHUB_EP_CONFIG);
  591. if (ep->epn.desc_mode) {
  592. /* Clear DMA status, including the DMA read ptr */
  593. writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
  594. /* Set descriptor base */
  595. writel(ep->epn.descs_dma,
  596. ep->epn.regs + AST_VHUB_EP_DESC_BASE);
  597. /* Set base DMA config value */
  598. ep->epn.dma_conf = VHUB_EP_DMA_DESC_MODE;
  599. if (ep->epn.is_in)
  600. ep->epn.dma_conf |= VHUB_EP_DMA_IN_LONG_MODE;
  601. /* First reset and disable all operations */
  602. writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
  603. ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
  604. /* Enable descriptor mode */
  605. writel(ep->epn.dma_conf,
  606. ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
  607. } else {
  608. /* Set base DMA config value */
  609. ep->epn.dma_conf = VHUB_EP_DMA_SINGLE_STAGE;
  610. /* Reset and switch to single stage mode */
  611. writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
  612. ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
  613. writel(ep->epn.dma_conf,
  614. ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
  615. writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
  616. }
  617. /* Cleanup data toggle just in case */
  618. writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
  619. vhub->regs + AST_VHUB_EP_TOGGLE);
  620. /* Cleanup and enable ACK interrupt */
  621. imask = VHUB_EP_IRQ(ep->epn.g_idx);
  622. writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
  623. ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
  624. ep_ier |= imask;
  625. writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
  626. /* Woot, we are online ! */
  627. ep->epn.enabled = true;
  628. spin_unlock_irqrestore(&vhub->lock, flags);
  629. return 0;
  630. }
  631. static void ast_vhub_epn_dispose(struct usb_ep *u_ep)
  632. {
  633. struct ast_vhub_ep *ep = to_ast_ep(u_ep);
  634. if (WARN_ON(!ep->dev || !ep->d_idx))
  635. return;
  636. EPDBG(ep, "Releasing endpoint\n");
  637. /* Take it out of the EP list */
  638. list_del_init(&ep->ep.ep_list);
  639. /* Mark the address free in the device */
  640. ep->dev->epns[ep->d_idx - 1] = NULL;
  641. /* Free name & DMA buffers */
  642. kfree(ep->ep.name);
  643. ep->ep.name = NULL;
  644. dma_free_coherent(&ep->vhub->pdev->dev,
  645. AST_VHUB_EPn_MAX_PACKET +
  646. 8 * AST_VHUB_DESCS_COUNT,
  647. ep->buf, ep->buf_dma);
  648. ep->buf = NULL;
  649. ep->epn.descs = NULL;
  650. /* Mark free */
  651. ep->dev = NULL;
  652. }
  653. static const struct usb_ep_ops ast_vhub_epn_ops = {
  654. .enable = ast_vhub_epn_enable,
  655. .disable = ast_vhub_epn_disable,
  656. .dispose = ast_vhub_epn_dispose,
  657. .queue = ast_vhub_epn_queue,
  658. .dequeue = ast_vhub_epn_dequeue,
  659. .set_halt = ast_vhub_epn_set_halt,
  660. .set_wedge = ast_vhub_epn_set_wedge,
  661. .alloc_request = ast_vhub_alloc_request,
  662. .free_request = ast_vhub_free_request,
  663. };
  664. struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr)
  665. {
  666. struct ast_vhub *vhub = d->vhub;
  667. struct ast_vhub_ep *ep;
  668. unsigned long flags;
  669. int i;
  670. /* Find a free one (no device) */
  671. spin_lock_irqsave(&vhub->lock, flags);
  672. for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++)
  673. if (vhub->epns[i].dev == NULL)
  674. break;
  675. if (i >= AST_VHUB_NUM_GEN_EPs) {
  676. spin_unlock_irqrestore(&vhub->lock, flags);
  677. return NULL;
  678. }
  679. /* Set it up */
  680. ep = &vhub->epns[i];
  681. ep->dev = d;
  682. spin_unlock_irqrestore(&vhub->lock, flags);
  683. DDBG(d, "Allocating gen EP %d for addr %d\n", i, addr);
  684. INIT_LIST_HEAD(&ep->queue);
  685. ep->d_idx = addr;
  686. ep->vhub = vhub;
  687. ep->ep.ops = &ast_vhub_epn_ops;
  688. ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", addr);
  689. d->epns[addr-1] = ep;
  690. ep->epn.g_idx = i;
  691. ep->epn.regs = vhub->regs + 0x200 + (i * 0x10);
  692. ep->buf = dma_alloc_coherent(&vhub->pdev->dev,
  693. AST_VHUB_EPn_MAX_PACKET +
  694. 8 * AST_VHUB_DESCS_COUNT,
  695. &ep->buf_dma, GFP_KERNEL);
  696. if (!ep->buf) {
  697. kfree(ep->ep.name);
  698. ep->ep.name = NULL;
  699. return NULL;
  700. }
  701. ep->epn.descs = ep->buf + AST_VHUB_EPn_MAX_PACKET;
  702. ep->epn.descs_dma = ep->buf_dma + AST_VHUB_EPn_MAX_PACKET;
  703. usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EPn_MAX_PACKET);
  704. list_add_tail(&ep->ep.ep_list, &d->gadget.ep_list);
  705. ep->ep.caps.type_iso = true;
  706. ep->ep.caps.type_bulk = true;
  707. ep->ep.caps.type_int = true;
  708. ep->ep.caps.dir_in = true;
  709. ep->ep.caps.dir_out = true;
  710. return ep;
  711. }