gr_udc.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
  4. *
  5. * 2013 (c) Aeroflex Gaisler AB
  6. *
  7. * This driver supports GRUSBDC USB Device Controller cores available in the
  8. * GRLIB VHDL IP core library.
  9. *
  10. * Full documentation of the GRUSBDC core can be found here:
  11. * http://www.gaisler.com/products/grlib/grip.pdf
  12. *
  13. * Contributors:
  14. * - Andreas Larsson <andreas@gaisler.com>
  15. * - Marko Isomaki
  16. */
  17. /*
  18. * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
  19. * individually configurable to any of the four USB transfer types. This driver
  20. * only supports cores in DMA mode.
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/slab.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/errno.h>
  27. #include <linux/list.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/device.h>
  30. #include <linux/usb/ch9.h>
  31. #include <linux/usb/gadget.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/dmapool.h>
  34. #include <linux/debugfs.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/of_platform.h>
  37. #include <linux/of_irq.h>
  38. #include <linux/of_address.h>
  39. #include <asm/byteorder.h>
  40. #include "gr_udc.h"
  41. #define DRIVER_NAME "gr_udc"
  42. #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
  43. static const char driver_name[] = DRIVER_NAME;
  44. static const char driver_desc[] = DRIVER_DESC;
  45. #define gr_read32(x) (ioread32be((x)))
  46. #define gr_write32(x, v) (iowrite32be((v), (x)))
  47. /* USB speed and corresponding string calculated from status register value */
  48. #define GR_SPEED(status) \
  49. ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
  50. #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
  51. /* Size of hardware buffer calculated from epctrl register value */
  52. #define GR_BUFFER_SIZE(epctrl) \
  53. ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
  54. GR_EPCTRL_BUFSZ_SCALER)
  55. /* ---------------------------------------------------------------------- */
  56. /* Debug printout functionality */
  57. static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
  58. static const char *gr_ep0state_string(enum gr_ep0state state)
  59. {
  60. static const char *const names[] = {
  61. [GR_EP0_DISCONNECT] = "disconnect",
  62. [GR_EP0_SETUP] = "setup",
  63. [GR_EP0_IDATA] = "idata",
  64. [GR_EP0_ODATA] = "odata",
  65. [GR_EP0_ISTATUS] = "istatus",
  66. [GR_EP0_OSTATUS] = "ostatus",
  67. [GR_EP0_STALL] = "stall",
  68. [GR_EP0_SUSPEND] = "suspend",
  69. };
  70. if (state < 0 || state >= ARRAY_SIZE(names))
  71. return "UNKNOWN";
  72. return names[state];
  73. }
  74. #ifdef VERBOSE_DEBUG
  75. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  76. struct gr_request *req)
  77. {
  78. int buflen = ep->is_in ? req->req.length : req->req.actual;
  79. int rowlen = 32;
  80. int plen = min(rowlen, buflen);
  81. dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
  82. (buflen > plen ? " (truncated)" : ""));
  83. print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
  84. rowlen, 4, req->req.buf, plen, false);
  85. }
  86. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  87. u16 value, u16 index, u16 length)
  88. {
  89. dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
  90. type, request, value, index, length);
  91. }
  92. #else /* !VERBOSE_DEBUG */
  93. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  94. struct gr_request *req) {}
  95. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  96. u16 value, u16 index, u16 length) {}
  97. #endif /* VERBOSE_DEBUG */
  98. /* ---------------------------------------------------------------------- */
  99. /* Debugfs functionality */
  100. #ifdef CONFIG_USB_GADGET_DEBUG_FS
  101. static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
  102. {
  103. u32 epctrl = gr_read32(&ep->regs->epctrl);
  104. u32 epstat = gr_read32(&ep->regs->epstat);
  105. int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
  106. struct gr_request *req;
  107. seq_printf(seq, "%s:\n", ep->ep.name);
  108. seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
  109. seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
  110. seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
  111. seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
  112. seq_printf(seq, " dma_start = %d\n", ep->dma_start);
  113. seq_printf(seq, " stopped = %d\n", ep->stopped);
  114. seq_printf(seq, " wedged = %d\n", ep->wedged);
  115. seq_printf(seq, " callback = %d\n", ep->callback);
  116. seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
  117. seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
  118. seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
  119. if (mode == 1 || mode == 3)
  120. seq_printf(seq, " nt = %d\n",
  121. (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
  122. seq_printf(seq, " Buffer 0: %s %s%d\n",
  123. epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
  124. epstat & GR_EPSTAT_BS ? " " : "selected ",
  125. (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
  126. seq_printf(seq, " Buffer 1: %s %s%d\n",
  127. epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
  128. epstat & GR_EPSTAT_BS ? "selected " : " ",
  129. (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
  130. if (list_empty(&ep->queue)) {
  131. seq_puts(seq, " Queue: empty\n\n");
  132. return;
  133. }
  134. seq_puts(seq, " Queue:\n");
  135. list_for_each_entry(req, &ep->queue, queue) {
  136. struct gr_dma_desc *desc;
  137. struct gr_dma_desc *next;
  138. seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
  139. &req->req.buf, req->req.actual, req->req.length);
  140. next = req->first_desc;
  141. do {
  142. desc = next;
  143. next = desc->next_desc;
  144. seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
  145. desc == req->curr_desc ? 'c' : ' ',
  146. desc, desc->paddr, desc->ctrl, desc->data);
  147. } while (desc != req->last_desc);
  148. }
  149. seq_puts(seq, "\n");
  150. }
  151. static int gr_dfs_show(struct seq_file *seq, void *v)
  152. {
  153. struct gr_udc *dev = seq->private;
  154. u32 control = gr_read32(&dev->regs->control);
  155. u32 status = gr_read32(&dev->regs->status);
  156. struct gr_ep *ep;
  157. seq_printf(seq, "usb state = %s\n",
  158. usb_state_string(dev->gadget.state));
  159. seq_printf(seq, "address = %d\n",
  160. (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
  161. seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
  162. seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
  163. seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
  164. seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
  165. seq_printf(seq, "test_mode = %d\n", dev->test_mode);
  166. seq_puts(seq, "\n");
  167. list_for_each_entry(ep, &dev->ep_list, ep_list)
  168. gr_seq_ep_show(seq, ep);
  169. return 0;
  170. }
  171. DEFINE_SHOW_ATTRIBUTE(gr_dfs);
  172. static void gr_dfs_create(struct gr_udc *dev)
  173. {
  174. const char *name = "gr_udc_state";
  175. dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
  176. dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root, dev,
  177. &gr_dfs_fops);
  178. }
  179. static void gr_dfs_delete(struct gr_udc *dev)
  180. {
  181. /* Handles NULL and ERR pointers internally */
  182. debugfs_remove(dev->dfs_state);
  183. debugfs_remove(dev->dfs_root);
  184. }
  185. #else /* !CONFIG_USB_GADGET_DEBUG_FS */
  186. static void gr_dfs_create(struct gr_udc *dev) {}
  187. static void gr_dfs_delete(struct gr_udc *dev) {}
  188. #endif /* CONFIG_USB_GADGET_DEBUG_FS */
  189. /* ---------------------------------------------------------------------- */
  190. /* DMA and request handling */
  191. /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
  192. static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
  193. {
  194. dma_addr_t paddr;
  195. struct gr_dma_desc *dma_desc;
  196. dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr);
  197. if (!dma_desc) {
  198. dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
  199. return NULL;
  200. }
  201. dma_desc->paddr = paddr;
  202. return dma_desc;
  203. }
  204. static inline void gr_free_dma_desc(struct gr_udc *dev,
  205. struct gr_dma_desc *desc)
  206. {
  207. dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
  208. }
  209. /* Frees the chain of struct gr_dma_desc for the given request */
  210. static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
  211. {
  212. struct gr_dma_desc *desc;
  213. struct gr_dma_desc *next;
  214. next = req->first_desc;
  215. if (!next)
  216. return;
  217. do {
  218. desc = next;
  219. next = desc->next_desc;
  220. gr_free_dma_desc(dev, desc);
  221. } while (desc != req->last_desc);
  222. req->first_desc = NULL;
  223. req->curr_desc = NULL;
  224. req->last_desc = NULL;
  225. }
  226. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
  227. /*
  228. * Frees allocated resources and calls the appropriate completion function/setup
  229. * package handler for a finished request.
  230. *
  231. * Must be called with dev->lock held and irqs disabled.
  232. */
  233. static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
  234. int status)
  235. __releases(&dev->lock)
  236. __acquires(&dev->lock)
  237. {
  238. struct gr_udc *dev;
  239. list_del_init(&req->queue);
  240. if (likely(req->req.status == -EINPROGRESS))
  241. req->req.status = status;
  242. else
  243. status = req->req.status;
  244. dev = ep->dev;
  245. usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
  246. gr_free_dma_desc_chain(dev, req);
  247. if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
  248. req->req.actual = req->req.length;
  249. } else if (req->oddlen && req->req.actual > req->evenlen) {
  250. /*
  251. * Copy to user buffer in this case where length was not evenly
  252. * divisible by ep->ep.maxpacket and the last descriptor was
  253. * actually used.
  254. */
  255. char *buftail = ((char *)req->req.buf + req->evenlen);
  256. memcpy(buftail, ep->tailbuf, req->oddlen);
  257. if (req->req.actual > req->req.length) {
  258. /* We got more data than was requested */
  259. dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
  260. ep->ep.name);
  261. gr_dbgprint_request("OVFL", ep, req);
  262. req->req.status = -EOVERFLOW;
  263. }
  264. }
  265. if (!status) {
  266. if (ep->is_in)
  267. gr_dbgprint_request("SENT", ep, req);
  268. else
  269. gr_dbgprint_request("RECV", ep, req);
  270. }
  271. /* Prevent changes to ep->queue during callback */
  272. ep->callback = 1;
  273. if (req == dev->ep0reqo && !status) {
  274. if (req->setup)
  275. gr_ep0_setup(dev, req);
  276. else
  277. dev_err(dev->dev,
  278. "Unexpected non setup packet on ep0in\n");
  279. } else if (req->req.complete) {
  280. spin_unlock(&dev->lock);
  281. usb_gadget_giveback_request(&ep->ep, &req->req);
  282. spin_lock(&dev->lock);
  283. }
  284. ep->callback = 0;
  285. }
  286. static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  287. {
  288. struct gr_request *req;
  289. req = kzalloc(sizeof(*req), gfp_flags);
  290. if (!req)
  291. return NULL;
  292. INIT_LIST_HEAD(&req->queue);
  293. return &req->req;
  294. }
  295. /*
  296. * Starts DMA for endpoint ep if there are requests in the queue.
  297. *
  298. * Must be called with dev->lock held and with !ep->stopped.
  299. */
  300. static void gr_start_dma(struct gr_ep *ep)
  301. {
  302. struct gr_request *req;
  303. u32 dmactrl;
  304. if (list_empty(&ep->queue)) {
  305. ep->dma_start = 0;
  306. return;
  307. }
  308. req = list_first_entry(&ep->queue, struct gr_request, queue);
  309. /* A descriptor should already have been allocated */
  310. BUG_ON(!req->curr_desc);
  311. /*
  312. * The DMA controller can not handle smaller OUT buffers than
  313. * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
  314. * long packet are received. Therefore an internal bounce buffer gets
  315. * used when such a request gets enabled.
  316. */
  317. if (!ep->is_in && req->oddlen)
  318. req->last_desc->data = ep->tailbuf_paddr;
  319. wmb(); /* Make sure all is settled before handing it over to DMA */
  320. /* Set the descriptor pointer in the hardware */
  321. gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
  322. /* Announce available descriptors */
  323. dmactrl = gr_read32(&ep->regs->dmactrl);
  324. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
  325. ep->dma_start = 1;
  326. }
  327. /*
  328. * Finishes the first request in the ep's queue and, if available, starts the
  329. * next request in queue.
  330. *
  331. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  332. */
  333. static void gr_dma_advance(struct gr_ep *ep, int status)
  334. {
  335. struct gr_request *req;
  336. req = list_first_entry(&ep->queue, struct gr_request, queue);
  337. gr_finish_request(ep, req, status);
  338. gr_start_dma(ep); /* Regardless of ep->dma_start */
  339. }
  340. /*
  341. * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
  342. * transfer to be canceled and clears GR_DMACTRL_DA.
  343. *
  344. * Must be called with dev->lock held.
  345. */
  346. static void gr_abort_dma(struct gr_ep *ep)
  347. {
  348. u32 dmactrl;
  349. dmactrl = gr_read32(&ep->regs->dmactrl);
  350. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
  351. }
  352. /*
  353. * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
  354. * chain.
  355. *
  356. * Size is not used for OUT endpoints. Hardware can not be instructed to handle
  357. * smaller buffer than MAXPL in the OUT direction.
  358. */
  359. static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
  360. dma_addr_t data, unsigned size, gfp_t gfp_flags)
  361. {
  362. struct gr_dma_desc *desc;
  363. desc = gr_alloc_dma_desc(ep, gfp_flags);
  364. if (!desc)
  365. return -ENOMEM;
  366. desc->data = data;
  367. if (ep->is_in)
  368. desc->ctrl =
  369. (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
  370. else
  371. desc->ctrl = GR_DESC_OUT_CTRL_IE;
  372. if (!req->first_desc) {
  373. req->first_desc = desc;
  374. req->curr_desc = desc;
  375. } else {
  376. req->last_desc->next_desc = desc;
  377. req->last_desc->next = desc->paddr;
  378. req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
  379. }
  380. req->last_desc = desc;
  381. return 0;
  382. }
  383. /*
  384. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  385. * together covers req->req.length bytes of the buffer at DMA address
  386. * req->req.dma for the OUT direction.
  387. *
  388. * The first descriptor in the chain is enabled, the rest disabled. The
  389. * interrupt handler will later enable them one by one when needed so we can
  390. * find out when the transfer is finished. For OUT endpoints, all descriptors
  391. * therefore generate interrutps.
  392. */
  393. static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
  394. gfp_t gfp_flags)
  395. {
  396. u16 bytes_left; /* Bytes left to provide descriptors for */
  397. u16 bytes_used; /* Bytes accommodated for */
  398. int ret = 0;
  399. req->first_desc = NULL; /* Signals that no allocation is done yet */
  400. bytes_left = req->req.length;
  401. bytes_used = 0;
  402. while (bytes_left > 0) {
  403. dma_addr_t start = req->req.dma + bytes_used;
  404. u16 size = min(bytes_left, ep->bytes_per_buffer);
  405. if (size < ep->bytes_per_buffer) {
  406. /* Prepare using bounce buffer */
  407. req->evenlen = req->req.length - bytes_left;
  408. req->oddlen = size;
  409. }
  410. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  411. if (ret)
  412. goto alloc_err;
  413. bytes_left -= size;
  414. bytes_used += size;
  415. }
  416. req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  417. return 0;
  418. alloc_err:
  419. gr_free_dma_desc_chain(ep->dev, req);
  420. return ret;
  421. }
  422. /*
  423. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  424. * together covers req->req.length bytes of the buffer at DMA address
  425. * req->req.dma for the IN direction.
  426. *
  427. * When more data is provided than the maximum payload size, the hardware splits
  428. * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
  429. * is always set to a multiple of the maximum payload (restricted to the valid
  430. * number of maximum payloads during high bandwidth isochronous or interrupt
  431. * transfers)
  432. *
  433. * All descriptors are enabled from the beginning and we only generate an
  434. * interrupt for the last one indicating that the entire request has been pushed
  435. * to hardware.
  436. */
  437. static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
  438. gfp_t gfp_flags)
  439. {
  440. u16 bytes_left; /* Bytes left in req to provide descriptors for */
  441. u16 bytes_used; /* Bytes in req accommodated for */
  442. int ret = 0;
  443. req->first_desc = NULL; /* Signals that no allocation is done yet */
  444. bytes_left = req->req.length;
  445. bytes_used = 0;
  446. do { /* Allow for zero length packets */
  447. dma_addr_t start = req->req.dma + bytes_used;
  448. u16 size = min(bytes_left, ep->bytes_per_buffer);
  449. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  450. if (ret)
  451. goto alloc_err;
  452. bytes_left -= size;
  453. bytes_used += size;
  454. } while (bytes_left > 0);
  455. /*
  456. * Send an extra zero length packet to indicate that no more data is
  457. * available when req->req.zero is set and the data length is even
  458. * multiples of ep->ep.maxpacket.
  459. */
  460. if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
  461. ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
  462. if (ret)
  463. goto alloc_err;
  464. }
  465. /*
  466. * For IN packets we only want to know when the last packet has been
  467. * transmitted (not just put into internal buffers).
  468. */
  469. req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
  470. return 0;
  471. alloc_err:
  472. gr_free_dma_desc_chain(ep->dev, req);
  473. return ret;
  474. }
  475. /* Must be called with dev->lock held */
  476. static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
  477. {
  478. struct gr_udc *dev = ep->dev;
  479. int ret;
  480. if (unlikely(!ep->ep.desc && ep->num != 0)) {
  481. dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
  482. return -EINVAL;
  483. }
  484. if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
  485. dev_err(dev->dev,
  486. "Invalid request for %s: buf=%p list_empty=%d\n",
  487. ep->ep.name, req->req.buf, list_empty(&req->queue));
  488. return -EINVAL;
  489. }
  490. if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
  491. dev_err(dev->dev, "-ESHUTDOWN");
  492. return -ESHUTDOWN;
  493. }
  494. /* Can't touch registers when suspended */
  495. if (dev->ep0state == GR_EP0_SUSPEND) {
  496. dev_err(dev->dev, "-EBUSY");
  497. return -EBUSY;
  498. }
  499. /* Set up DMA mapping in case the caller didn't */
  500. ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
  501. if (ret) {
  502. dev_err(dev->dev, "usb_gadget_map_request");
  503. return ret;
  504. }
  505. if (ep->is_in)
  506. ret = gr_setup_in_desc_list(ep, req, gfp_flags);
  507. else
  508. ret = gr_setup_out_desc_list(ep, req, gfp_flags);
  509. if (ret)
  510. return ret;
  511. req->req.status = -EINPROGRESS;
  512. req->req.actual = 0;
  513. list_add_tail(&req->queue, &ep->queue);
  514. /* Start DMA if not started, otherwise interrupt handler handles it */
  515. if (!ep->dma_start && likely(!ep->stopped))
  516. gr_start_dma(ep);
  517. return 0;
  518. }
  519. /*
  520. * Queue a request from within the driver.
  521. *
  522. * Must be called with dev->lock held.
  523. */
  524. static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
  525. gfp_t gfp_flags)
  526. {
  527. if (ep->is_in)
  528. gr_dbgprint_request("RESP", ep, req);
  529. return gr_queue(ep, req, gfp_flags);
  530. }
  531. /* ---------------------------------------------------------------------- */
  532. /* General helper functions */
  533. /*
  534. * Dequeue ALL requests.
  535. *
  536. * Must be called with dev->lock held and irqs disabled.
  537. */
  538. static void gr_ep_nuke(struct gr_ep *ep)
  539. {
  540. struct gr_request *req;
  541. ep->stopped = 1;
  542. ep->dma_start = 0;
  543. gr_abort_dma(ep);
  544. while (!list_empty(&ep->queue)) {
  545. req = list_first_entry(&ep->queue, struct gr_request, queue);
  546. gr_finish_request(ep, req, -ESHUTDOWN);
  547. }
  548. }
  549. /*
  550. * Reset the hardware state of this endpoint.
  551. *
  552. * Must be called with dev->lock held.
  553. */
  554. static void gr_ep_reset(struct gr_ep *ep)
  555. {
  556. gr_write32(&ep->regs->epctrl, 0);
  557. gr_write32(&ep->regs->dmactrl, 0);
  558. ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
  559. ep->ep.desc = NULL;
  560. ep->stopped = 1;
  561. ep->dma_start = 0;
  562. }
  563. /*
  564. * Generate STALL on ep0in/out.
  565. *
  566. * Must be called with dev->lock held.
  567. */
  568. static void gr_control_stall(struct gr_udc *dev)
  569. {
  570. u32 epctrl;
  571. epctrl = gr_read32(&dev->epo[0].regs->epctrl);
  572. gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  573. epctrl = gr_read32(&dev->epi[0].regs->epctrl);
  574. gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  575. dev->ep0state = GR_EP0_STALL;
  576. }
  577. /*
  578. * Halts, halts and wedges, or clears halt for an endpoint.
  579. *
  580. * Must be called with dev->lock held.
  581. */
  582. static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
  583. {
  584. u32 epctrl;
  585. int retval = 0;
  586. if (ep->num && !ep->ep.desc)
  587. return -EINVAL;
  588. if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
  589. return -EOPNOTSUPP;
  590. /* Never actually halt ep0, and therefore never clear halt for ep0 */
  591. if (!ep->num) {
  592. if (halt && !fromhost) {
  593. /* ep0 halt from gadget - generate protocol stall */
  594. gr_control_stall(ep->dev);
  595. dev_dbg(ep->dev->dev, "EP: stall ep0\n");
  596. return 0;
  597. }
  598. return -EINVAL;
  599. }
  600. dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
  601. (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
  602. epctrl = gr_read32(&ep->regs->epctrl);
  603. if (halt) {
  604. /* Set HALT */
  605. gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
  606. ep->stopped = 1;
  607. if (wedge)
  608. ep->wedged = 1;
  609. } else {
  610. gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
  611. ep->stopped = 0;
  612. ep->wedged = 0;
  613. /* Things might have been queued up in the meantime */
  614. if (!ep->dma_start)
  615. gr_start_dma(ep);
  616. }
  617. return retval;
  618. }
  619. /* Must be called with dev->lock held */
  620. static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
  621. {
  622. if (dev->ep0state != value)
  623. dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
  624. gr_ep0state_string(value));
  625. dev->ep0state = value;
  626. }
  627. /*
  628. * Should only be called when endpoints can not generate interrupts.
  629. *
  630. * Must be called with dev->lock held.
  631. */
  632. static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
  633. {
  634. gr_write32(&dev->regs->control, 0);
  635. wmb(); /* Make sure that we do not deny one of our interrupts */
  636. dev->irq_enabled = 0;
  637. }
  638. /*
  639. * Stop all device activity and disable data line pullup.
  640. *
  641. * Must be called with dev->lock held and irqs disabled.
  642. */
  643. static void gr_stop_activity(struct gr_udc *dev)
  644. {
  645. struct gr_ep *ep;
  646. list_for_each_entry(ep, &dev->ep_list, ep_list)
  647. gr_ep_nuke(ep);
  648. gr_disable_interrupts_and_pullup(dev);
  649. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  650. usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
  651. }
  652. /* ---------------------------------------------------------------------- */
  653. /* ep0 setup packet handling */
  654. static void gr_ep0_testmode_complete(struct usb_ep *_ep,
  655. struct usb_request *_req)
  656. {
  657. struct gr_ep *ep;
  658. struct gr_udc *dev;
  659. u32 control;
  660. ep = container_of(_ep, struct gr_ep, ep);
  661. dev = ep->dev;
  662. spin_lock(&dev->lock);
  663. control = gr_read32(&dev->regs->control);
  664. control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
  665. gr_write32(&dev->regs->control, control);
  666. spin_unlock(&dev->lock);
  667. }
  668. static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
  669. {
  670. /* Nothing needs to be done here */
  671. }
  672. /*
  673. * Queue a response on ep0in.
  674. *
  675. * Must be called with dev->lock held.
  676. */
  677. static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
  678. void (*complete)(struct usb_ep *ep,
  679. struct usb_request *req))
  680. {
  681. u8 *reqbuf = dev->ep0reqi->req.buf;
  682. int status;
  683. int i;
  684. for (i = 0; i < length; i++)
  685. reqbuf[i] = buf[i];
  686. dev->ep0reqi->req.length = length;
  687. dev->ep0reqi->req.complete = complete;
  688. status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
  689. if (status < 0)
  690. dev_err(dev->dev,
  691. "Could not queue ep0in setup response: %d\n", status);
  692. return status;
  693. }
  694. /*
  695. * Queue a 2 byte response on ep0in.
  696. *
  697. * Must be called with dev->lock held.
  698. */
  699. static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
  700. {
  701. __le16 le_response = cpu_to_le16(response);
  702. return gr_ep0_respond(dev, (u8 *)&le_response, 2,
  703. gr_ep0_dummy_complete);
  704. }
  705. /*
  706. * Queue a ZLP response on ep0in.
  707. *
  708. * Must be called with dev->lock held.
  709. */
  710. static inline int gr_ep0_respond_empty(struct gr_udc *dev)
  711. {
  712. return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
  713. }
  714. /*
  715. * This is run when a SET_ADDRESS request is received. First writes
  716. * the new address to the control register which is updated internally
  717. * when the next IN packet is ACKED.
  718. *
  719. * Must be called with dev->lock held.
  720. */
  721. static void gr_set_address(struct gr_udc *dev, u8 address)
  722. {
  723. u32 control;
  724. control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
  725. control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
  726. control |= GR_CONTROL_SU;
  727. gr_write32(&dev->regs->control, control);
  728. }
  729. /*
  730. * Returns negative for STALL, 0 for successful handling and positive for
  731. * delegation.
  732. *
  733. * Must be called with dev->lock held.
  734. */
  735. static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
  736. u16 value, u16 index)
  737. {
  738. u16 response;
  739. u8 test;
  740. switch (request) {
  741. case USB_REQ_SET_ADDRESS:
  742. dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
  743. gr_set_address(dev, value & 0xff);
  744. if (value)
  745. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  746. else
  747. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  748. return gr_ep0_respond_empty(dev);
  749. case USB_REQ_GET_STATUS:
  750. /* Self powered | remote wakeup */
  751. response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
  752. return gr_ep0_respond_u16(dev, response);
  753. case USB_REQ_SET_FEATURE:
  754. switch (value) {
  755. case USB_DEVICE_REMOTE_WAKEUP:
  756. /* Allow remote wakeup */
  757. dev->remote_wakeup = 1;
  758. return gr_ep0_respond_empty(dev);
  759. case USB_DEVICE_TEST_MODE:
  760. /* The hardware does not support TEST_FORCE_EN */
  761. test = index >> 8;
  762. if (test >= TEST_J && test <= TEST_PACKET) {
  763. dev->test_mode = test;
  764. return gr_ep0_respond(dev, NULL, 0,
  765. gr_ep0_testmode_complete);
  766. }
  767. }
  768. break;
  769. case USB_REQ_CLEAR_FEATURE:
  770. switch (value) {
  771. case USB_DEVICE_REMOTE_WAKEUP:
  772. /* Disallow remote wakeup */
  773. dev->remote_wakeup = 0;
  774. return gr_ep0_respond_empty(dev);
  775. }
  776. break;
  777. }
  778. return 1; /* Delegate the rest */
  779. }
  780. /*
  781. * Returns negative for STALL, 0 for successful handling and positive for
  782. * delegation.
  783. *
  784. * Must be called with dev->lock held.
  785. */
  786. static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
  787. u16 value, u16 index)
  788. {
  789. if (dev->gadget.state != USB_STATE_CONFIGURED)
  790. return -1;
  791. /*
  792. * Should return STALL for invalid interfaces, but udc driver does not
  793. * know anything about that. However, many gadget drivers do not handle
  794. * GET_STATUS so we need to take care of that.
  795. */
  796. switch (request) {
  797. case USB_REQ_GET_STATUS:
  798. return gr_ep0_respond_u16(dev, 0x0000);
  799. case USB_REQ_SET_FEATURE:
  800. case USB_REQ_CLEAR_FEATURE:
  801. /*
  802. * No possible valid standard requests. Still let gadget drivers
  803. * have a go at it.
  804. */
  805. break;
  806. }
  807. return 1; /* Delegate the rest */
  808. }
  809. /*
  810. * Returns negative for STALL, 0 for successful handling and positive for
  811. * delegation.
  812. *
  813. * Must be called with dev->lock held.
  814. */
  815. static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
  816. u16 value, u16 index)
  817. {
  818. struct gr_ep *ep;
  819. int status;
  820. int halted;
  821. u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
  822. u8 is_in = index & USB_ENDPOINT_DIR_MASK;
  823. if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
  824. return -1;
  825. if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
  826. return -1;
  827. ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
  828. switch (request) {
  829. case USB_REQ_GET_STATUS:
  830. halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
  831. return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
  832. case USB_REQ_SET_FEATURE:
  833. switch (value) {
  834. case USB_ENDPOINT_HALT:
  835. status = gr_ep_halt_wedge(ep, 1, 0, 1);
  836. if (status >= 0)
  837. status = gr_ep0_respond_empty(dev);
  838. return status;
  839. }
  840. break;
  841. case USB_REQ_CLEAR_FEATURE:
  842. switch (value) {
  843. case USB_ENDPOINT_HALT:
  844. if (ep->wedged)
  845. return -1;
  846. status = gr_ep_halt_wedge(ep, 0, 0, 1);
  847. if (status >= 0)
  848. status = gr_ep0_respond_empty(dev);
  849. return status;
  850. }
  851. break;
  852. }
  853. return 1; /* Delegate the rest */
  854. }
  855. /* Must be called with dev->lock held */
  856. static void gr_ep0out_requeue(struct gr_udc *dev)
  857. {
  858. int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
  859. if (ret)
  860. dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
  861. ret);
  862. }
  863. /*
  864. * The main function dealing with setup requests on ep0.
  865. *
  866. * Must be called with dev->lock held and irqs disabled
  867. */
  868. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
  869. __releases(&dev->lock)
  870. __acquires(&dev->lock)
  871. {
  872. union {
  873. struct usb_ctrlrequest ctrl;
  874. u8 raw[8];
  875. u32 word[2];
  876. } u;
  877. u8 type;
  878. u8 request;
  879. u16 value;
  880. u16 index;
  881. u16 length;
  882. int i;
  883. int status;
  884. /* Restore from ep0 halt */
  885. if (dev->ep0state == GR_EP0_STALL) {
  886. gr_set_ep0state(dev, GR_EP0_SETUP);
  887. if (!req->req.actual)
  888. goto out;
  889. }
  890. if (dev->ep0state == GR_EP0_ISTATUS) {
  891. gr_set_ep0state(dev, GR_EP0_SETUP);
  892. if (req->req.actual > 0)
  893. dev_dbg(dev->dev,
  894. "Unexpected setup packet at state %s\n",
  895. gr_ep0state_string(GR_EP0_ISTATUS));
  896. else
  897. goto out; /* Got expected ZLP */
  898. } else if (dev->ep0state != GR_EP0_SETUP) {
  899. dev_info(dev->dev,
  900. "Unexpected ep0out request at state %s - stalling\n",
  901. gr_ep0state_string(dev->ep0state));
  902. gr_control_stall(dev);
  903. gr_set_ep0state(dev, GR_EP0_SETUP);
  904. goto out;
  905. } else if (!req->req.actual) {
  906. dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
  907. gr_ep0state_string(dev->ep0state));
  908. goto out;
  909. }
  910. /* Handle SETUP packet */
  911. for (i = 0; i < req->req.actual; i++)
  912. u.raw[i] = ((u8 *)req->req.buf)[i];
  913. type = u.ctrl.bRequestType;
  914. request = u.ctrl.bRequest;
  915. value = le16_to_cpu(u.ctrl.wValue);
  916. index = le16_to_cpu(u.ctrl.wIndex);
  917. length = le16_to_cpu(u.ctrl.wLength);
  918. gr_dbgprint_devreq(dev, type, request, value, index, length);
  919. /* Check for data stage */
  920. if (length) {
  921. if (type & USB_DIR_IN)
  922. gr_set_ep0state(dev, GR_EP0_IDATA);
  923. else
  924. gr_set_ep0state(dev, GR_EP0_ODATA);
  925. }
  926. status = 1; /* Positive status flags delegation */
  927. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  928. switch (type & USB_RECIP_MASK) {
  929. case USB_RECIP_DEVICE:
  930. status = gr_device_request(dev, type, request,
  931. value, index);
  932. break;
  933. case USB_RECIP_ENDPOINT:
  934. status = gr_endpoint_request(dev, type, request,
  935. value, index);
  936. break;
  937. case USB_RECIP_INTERFACE:
  938. status = gr_interface_request(dev, type, request,
  939. value, index);
  940. break;
  941. }
  942. }
  943. if (status > 0) {
  944. spin_unlock(&dev->lock);
  945. dev_vdbg(dev->dev, "DELEGATE\n");
  946. status = dev->driver->setup(&dev->gadget, &u.ctrl);
  947. spin_lock(&dev->lock);
  948. }
  949. /* Generate STALL on both ep0out and ep0in if requested */
  950. if (unlikely(status < 0)) {
  951. dev_vdbg(dev->dev, "STALL\n");
  952. gr_control_stall(dev);
  953. }
  954. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
  955. request == USB_REQ_SET_CONFIGURATION) {
  956. if (!value) {
  957. dev_dbg(dev->dev, "STATUS: deconfigured\n");
  958. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  959. } else if (status >= 0) {
  960. /* Not configured unless gadget OK:s it */
  961. dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
  962. usb_gadget_set_state(&dev->gadget,
  963. USB_STATE_CONFIGURED);
  964. }
  965. }
  966. /* Get ready for next stage */
  967. if (dev->ep0state == GR_EP0_ODATA)
  968. gr_set_ep0state(dev, GR_EP0_OSTATUS);
  969. else if (dev->ep0state == GR_EP0_IDATA)
  970. gr_set_ep0state(dev, GR_EP0_ISTATUS);
  971. else
  972. gr_set_ep0state(dev, GR_EP0_SETUP);
  973. out:
  974. gr_ep0out_requeue(dev);
  975. }
  976. /* ---------------------------------------------------------------------- */
  977. /* VBUS and USB reset handling */
  978. /* Must be called with dev->lock held and irqs disabled */
  979. static void gr_vbus_connected(struct gr_udc *dev, u32 status)
  980. {
  981. u32 control;
  982. dev->gadget.speed = GR_SPEED(status);
  983. usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
  984. /* Turn on full interrupts and pullup */
  985. control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
  986. GR_CONTROL_SP | GR_CONTROL_EP);
  987. gr_write32(&dev->regs->control, control);
  988. }
  989. /* Must be called with dev->lock held */
  990. static void gr_enable_vbus_detect(struct gr_udc *dev)
  991. {
  992. u32 status;
  993. dev->irq_enabled = 1;
  994. wmb(); /* Make sure we do not ignore an interrupt */
  995. gr_write32(&dev->regs->control, GR_CONTROL_VI);
  996. /* Take care of the case we are already plugged in at this point */
  997. status = gr_read32(&dev->regs->status);
  998. if (status & GR_STATUS_VB)
  999. gr_vbus_connected(dev, status);
  1000. }
  1001. /* Must be called with dev->lock held and irqs disabled */
  1002. static void gr_vbus_disconnected(struct gr_udc *dev)
  1003. {
  1004. gr_stop_activity(dev);
  1005. /* Report disconnect */
  1006. if (dev->driver && dev->driver->disconnect) {
  1007. spin_unlock(&dev->lock);
  1008. dev->driver->disconnect(&dev->gadget);
  1009. spin_lock(&dev->lock);
  1010. }
  1011. gr_enable_vbus_detect(dev);
  1012. }
  1013. /* Must be called with dev->lock held and irqs disabled */
  1014. static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
  1015. {
  1016. gr_set_address(dev, 0);
  1017. gr_set_ep0state(dev, GR_EP0_SETUP);
  1018. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  1019. dev->gadget.speed = GR_SPEED(status);
  1020. gr_ep_nuke(&dev->epo[0]);
  1021. gr_ep_nuke(&dev->epi[0]);
  1022. dev->epo[0].stopped = 0;
  1023. dev->epi[0].stopped = 0;
  1024. gr_ep0out_requeue(dev);
  1025. }
  1026. /* ---------------------------------------------------------------------- */
  1027. /* Irq handling */
  1028. /*
  1029. * Handles interrupts from in endpoints. Returns whether something was handled.
  1030. *
  1031. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1032. */
  1033. static int gr_handle_in_ep(struct gr_ep *ep)
  1034. {
  1035. struct gr_request *req;
  1036. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1037. if (!req->last_desc)
  1038. return 0;
  1039. if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
  1040. return 0; /* Not put in hardware buffers yet */
  1041. if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
  1042. return 0; /* Not transmitted yet, still in hardware buffers */
  1043. /* Write complete */
  1044. gr_dma_advance(ep, 0);
  1045. return 1;
  1046. }
  1047. /*
  1048. * Handles interrupts from out endpoints. Returns whether something was handled.
  1049. *
  1050. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1051. */
  1052. static int gr_handle_out_ep(struct gr_ep *ep)
  1053. {
  1054. u32 ep_dmactrl;
  1055. u32 ctrl;
  1056. u16 len;
  1057. struct gr_request *req;
  1058. struct gr_udc *dev = ep->dev;
  1059. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1060. if (!req->curr_desc)
  1061. return 0;
  1062. ctrl = READ_ONCE(req->curr_desc->ctrl);
  1063. if (ctrl & GR_DESC_OUT_CTRL_EN)
  1064. return 0; /* Not received yet */
  1065. /* Read complete */
  1066. len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
  1067. req->req.actual += len;
  1068. if (ctrl & GR_DESC_OUT_CTRL_SE)
  1069. req->setup = 1;
  1070. if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
  1071. /* Short packet or >= expected size - we are done */
  1072. if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
  1073. /*
  1074. * Send a status stage ZLP to ack the DATA stage in the
  1075. * OUT direction. This needs to be done before
  1076. * gr_dma_advance as that can lead to a call to
  1077. * ep0_setup that can change dev->ep0state.
  1078. */
  1079. gr_ep0_respond_empty(dev);
  1080. gr_set_ep0state(dev, GR_EP0_SETUP);
  1081. }
  1082. gr_dma_advance(ep, 0);
  1083. } else {
  1084. /* Not done yet. Enable the next descriptor to receive more. */
  1085. req->curr_desc = req->curr_desc->next_desc;
  1086. req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  1087. ep_dmactrl = gr_read32(&ep->regs->dmactrl);
  1088. gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
  1089. }
  1090. return 1;
  1091. }
  1092. /*
  1093. * Handle state changes. Returns whether something was handled.
  1094. *
  1095. * Must be called with dev->lock held and irqs disabled.
  1096. */
  1097. static int gr_handle_state_changes(struct gr_udc *dev)
  1098. {
  1099. u32 status = gr_read32(&dev->regs->status);
  1100. int handled = 0;
  1101. int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
  1102. dev->gadget.state == USB_STATE_ATTACHED);
  1103. /* VBUS valid detected */
  1104. if (!powstate && (status & GR_STATUS_VB)) {
  1105. dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
  1106. gr_vbus_connected(dev, status);
  1107. handled = 1;
  1108. }
  1109. /* Disconnect */
  1110. if (powstate && !(status & GR_STATUS_VB)) {
  1111. dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
  1112. gr_vbus_disconnected(dev);
  1113. handled = 1;
  1114. }
  1115. /* USB reset detected */
  1116. if (status & GR_STATUS_UR) {
  1117. dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
  1118. GR_SPEED_STR(status));
  1119. gr_write32(&dev->regs->status, GR_STATUS_UR);
  1120. gr_udc_usbreset(dev, status);
  1121. handled = 1;
  1122. }
  1123. /* Speed change */
  1124. if (dev->gadget.speed != GR_SPEED(status)) {
  1125. dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
  1126. GR_SPEED_STR(status));
  1127. dev->gadget.speed = GR_SPEED(status);
  1128. handled = 1;
  1129. }
  1130. /* Going into suspend */
  1131. if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
  1132. dev_dbg(dev->dev, "STATUS: USB suspend\n");
  1133. gr_set_ep0state(dev, GR_EP0_SUSPEND);
  1134. dev->suspended_from = dev->gadget.state;
  1135. usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
  1136. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1137. dev->driver && dev->driver->suspend) {
  1138. spin_unlock(&dev->lock);
  1139. dev->driver->suspend(&dev->gadget);
  1140. spin_lock(&dev->lock);
  1141. }
  1142. handled = 1;
  1143. }
  1144. /* Coming out of suspend */
  1145. if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
  1146. dev_dbg(dev->dev, "STATUS: USB resume\n");
  1147. if (dev->suspended_from == USB_STATE_POWERED)
  1148. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1149. else
  1150. gr_set_ep0state(dev, GR_EP0_SETUP);
  1151. usb_gadget_set_state(&dev->gadget, dev->suspended_from);
  1152. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1153. dev->driver && dev->driver->resume) {
  1154. spin_unlock(&dev->lock);
  1155. dev->driver->resume(&dev->gadget);
  1156. spin_lock(&dev->lock);
  1157. }
  1158. handled = 1;
  1159. }
  1160. return handled;
  1161. }
  1162. /* Non-interrupt context irq handler */
  1163. static irqreturn_t gr_irq_handler(int irq, void *_dev)
  1164. {
  1165. struct gr_udc *dev = _dev;
  1166. struct gr_ep *ep;
  1167. int handled = 0;
  1168. int i;
  1169. unsigned long flags;
  1170. spin_lock_irqsave(&dev->lock, flags);
  1171. if (!dev->irq_enabled)
  1172. goto out;
  1173. /*
  1174. * Check IN ep interrupts. We check these before the OUT eps because
  1175. * some gadgets reuse the request that might already be currently
  1176. * outstanding and needs to be completed (mainly setup requests).
  1177. */
  1178. for (i = 0; i < dev->nepi; i++) {
  1179. ep = &dev->epi[i];
  1180. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1181. handled = gr_handle_in_ep(ep) || handled;
  1182. }
  1183. /* Check OUT ep interrupts */
  1184. for (i = 0; i < dev->nepo; i++) {
  1185. ep = &dev->epo[i];
  1186. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1187. handled = gr_handle_out_ep(ep) || handled;
  1188. }
  1189. /* Check status interrupts */
  1190. handled = gr_handle_state_changes(dev) || handled;
  1191. /*
  1192. * Check AMBA DMA errors. Only check if we didn't find anything else to
  1193. * handle because this shouldn't happen if we did everything right.
  1194. */
  1195. if (!handled) {
  1196. list_for_each_entry(ep, &dev->ep_list, ep_list) {
  1197. if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
  1198. dev_err(dev->dev,
  1199. "AMBA Error occurred for %s\n",
  1200. ep->ep.name);
  1201. handled = 1;
  1202. }
  1203. }
  1204. }
  1205. out:
  1206. spin_unlock_irqrestore(&dev->lock, flags);
  1207. return handled ? IRQ_HANDLED : IRQ_NONE;
  1208. }
  1209. /* Interrupt context irq handler */
  1210. static irqreturn_t gr_irq(int irq, void *_dev)
  1211. {
  1212. struct gr_udc *dev = _dev;
  1213. if (!dev->irq_enabled)
  1214. return IRQ_NONE;
  1215. return IRQ_WAKE_THREAD;
  1216. }
  1217. /* ---------------------------------------------------------------------- */
  1218. /* USB ep ops */
  1219. /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
  1220. static int gr_ep_enable(struct usb_ep *_ep,
  1221. const struct usb_endpoint_descriptor *desc)
  1222. {
  1223. struct gr_udc *dev;
  1224. struct gr_ep *ep;
  1225. u8 mode;
  1226. u8 nt;
  1227. u16 max;
  1228. u16 buffer_size = 0;
  1229. u32 epctrl;
  1230. ep = container_of(_ep, struct gr_ep, ep);
  1231. if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
  1232. return -EINVAL;
  1233. dev = ep->dev;
  1234. /* 'ep0' IN and OUT are reserved */
  1235. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1236. return -EINVAL;
  1237. if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
  1238. return -ESHUTDOWN;
  1239. /* Make sure we are clear for enabling */
  1240. epctrl = gr_read32(&ep->regs->epctrl);
  1241. if (epctrl & GR_EPCTRL_EV)
  1242. return -EBUSY;
  1243. /* Check that directions match */
  1244. if (!ep->is_in != !usb_endpoint_dir_in(desc))
  1245. return -EINVAL;
  1246. /* Check ep num */
  1247. if ((!ep->is_in && ep->num >= dev->nepo) ||
  1248. (ep->is_in && ep->num >= dev->nepi))
  1249. return -EINVAL;
  1250. if (usb_endpoint_xfer_control(desc)) {
  1251. mode = 0;
  1252. } else if (usb_endpoint_xfer_isoc(desc)) {
  1253. mode = 1;
  1254. } else if (usb_endpoint_xfer_bulk(desc)) {
  1255. mode = 2;
  1256. } else if (usb_endpoint_xfer_int(desc)) {
  1257. mode = 3;
  1258. } else {
  1259. dev_err(dev->dev, "Unknown transfer type for %s\n",
  1260. ep->ep.name);
  1261. return -EINVAL;
  1262. }
  1263. /*
  1264. * Bits 10-0 set the max payload. 12-11 set the number of
  1265. * additional transactions.
  1266. */
  1267. max = usb_endpoint_maxp(desc);
  1268. nt = usb_endpoint_maxp_mult(desc) - 1;
  1269. buffer_size = GR_BUFFER_SIZE(epctrl);
  1270. if (nt && (mode == 0 || mode == 2)) {
  1271. dev_err(dev->dev,
  1272. "%s mode: multiple trans./microframe not valid\n",
  1273. (mode == 2 ? "Bulk" : "Control"));
  1274. return -EINVAL;
  1275. } else if (nt == 0x3) {
  1276. dev_err(dev->dev,
  1277. "Invalid value 0x3 for additional trans./microframe\n");
  1278. return -EINVAL;
  1279. } else if ((nt + 1) * max > buffer_size) {
  1280. dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
  1281. buffer_size, (nt + 1), max);
  1282. return -EINVAL;
  1283. } else if (max == 0) {
  1284. dev_err(dev->dev, "Max payload cannot be set to 0\n");
  1285. return -EINVAL;
  1286. } else if (max > ep->ep.maxpacket_limit) {
  1287. dev_err(dev->dev, "Requested max payload %d > limit %d\n",
  1288. max, ep->ep.maxpacket_limit);
  1289. return -EINVAL;
  1290. }
  1291. spin_lock(&ep->dev->lock);
  1292. if (!ep->stopped) {
  1293. spin_unlock(&ep->dev->lock);
  1294. return -EBUSY;
  1295. }
  1296. ep->stopped = 0;
  1297. ep->wedged = 0;
  1298. ep->ep.desc = desc;
  1299. ep->ep.maxpacket = max;
  1300. ep->dma_start = 0;
  1301. if (nt) {
  1302. /*
  1303. * Maximum possible size of all payloads in one microframe
  1304. * regardless of direction when using high-bandwidth mode.
  1305. */
  1306. ep->bytes_per_buffer = (nt + 1) * max;
  1307. } else if (ep->is_in) {
  1308. /*
  1309. * The biggest multiple of maximum packet size that fits into
  1310. * the buffer. The hardware will split up into many packets in
  1311. * the IN direction.
  1312. */
  1313. ep->bytes_per_buffer = (buffer_size / max) * max;
  1314. } else {
  1315. /*
  1316. * Only single packets will be placed the buffers in the OUT
  1317. * direction.
  1318. */
  1319. ep->bytes_per_buffer = max;
  1320. }
  1321. epctrl = (max << GR_EPCTRL_MAXPL_POS)
  1322. | (nt << GR_EPCTRL_NT_POS)
  1323. | (mode << GR_EPCTRL_TT_POS)
  1324. | GR_EPCTRL_EV;
  1325. if (ep->is_in)
  1326. epctrl |= GR_EPCTRL_PI;
  1327. gr_write32(&ep->regs->epctrl, epctrl);
  1328. gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
  1329. spin_unlock(&ep->dev->lock);
  1330. dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
  1331. ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
  1332. return 0;
  1333. }
  1334. /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
  1335. static int gr_ep_disable(struct usb_ep *_ep)
  1336. {
  1337. struct gr_ep *ep;
  1338. struct gr_udc *dev;
  1339. unsigned long flags;
  1340. ep = container_of(_ep, struct gr_ep, ep);
  1341. if (!_ep || !ep->ep.desc)
  1342. return -ENODEV;
  1343. dev = ep->dev;
  1344. /* 'ep0' IN and OUT are reserved */
  1345. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1346. return -EINVAL;
  1347. if (dev->ep0state == GR_EP0_SUSPEND)
  1348. return -EBUSY;
  1349. dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
  1350. spin_lock_irqsave(&dev->lock, flags);
  1351. gr_ep_nuke(ep);
  1352. gr_ep_reset(ep);
  1353. ep->ep.desc = NULL;
  1354. spin_unlock_irqrestore(&dev->lock, flags);
  1355. return 0;
  1356. }
  1357. /*
  1358. * Frees a request, but not any DMA buffers associated with it
  1359. * (gr_finish_request should already have taken care of that).
  1360. */
  1361. static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
  1362. {
  1363. struct gr_request *req;
  1364. if (!_ep || !_req)
  1365. return;
  1366. req = container_of(_req, struct gr_request, req);
  1367. /* Leads to memory leak */
  1368. WARN(!list_empty(&req->queue),
  1369. "request not dequeued properly before freeing\n");
  1370. kfree(req);
  1371. }
  1372. /* Queue a request from the gadget */
  1373. static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
  1374. gfp_t gfp_flags)
  1375. {
  1376. struct gr_ep *ep;
  1377. struct gr_request *req;
  1378. struct gr_udc *dev;
  1379. int ret;
  1380. if (unlikely(!_ep || !_req))
  1381. return -EINVAL;
  1382. ep = container_of(_ep, struct gr_ep, ep);
  1383. req = container_of(_req, struct gr_request, req);
  1384. dev = ep->dev;
  1385. spin_lock(&ep->dev->lock);
  1386. /*
  1387. * The ep0 pointer in the gadget struct is used both for ep0in and
  1388. * ep0out. In a data stage in the out direction ep0out needs to be used
  1389. * instead of the default ep0in. Completion functions might use
  1390. * driver_data, so that needs to be copied as well.
  1391. */
  1392. if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
  1393. ep = &dev->epo[0];
  1394. ep->ep.driver_data = dev->epi[0].ep.driver_data;
  1395. }
  1396. if (ep->is_in)
  1397. gr_dbgprint_request("EXTERN", ep, req);
  1398. ret = gr_queue(ep, req, GFP_ATOMIC);
  1399. spin_unlock(&ep->dev->lock);
  1400. return ret;
  1401. }
  1402. /* Dequeue JUST ONE request */
  1403. static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  1404. {
  1405. struct gr_request *req;
  1406. struct gr_ep *ep;
  1407. struct gr_udc *dev;
  1408. int ret = 0;
  1409. unsigned long flags;
  1410. ep = container_of(_ep, struct gr_ep, ep);
  1411. if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
  1412. return -EINVAL;
  1413. dev = ep->dev;
  1414. if (!dev->driver)
  1415. return -ESHUTDOWN;
  1416. /* We can't touch (DMA) registers when suspended */
  1417. if (dev->ep0state == GR_EP0_SUSPEND)
  1418. return -EBUSY;
  1419. spin_lock_irqsave(&dev->lock, flags);
  1420. /* Make sure it's actually queued on this endpoint */
  1421. list_for_each_entry(req, &ep->queue, queue) {
  1422. if (&req->req == _req)
  1423. break;
  1424. }
  1425. if (&req->req != _req) {
  1426. ret = -EINVAL;
  1427. goto out;
  1428. }
  1429. if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
  1430. /* This request is currently being processed */
  1431. gr_abort_dma(ep);
  1432. if (ep->stopped)
  1433. gr_finish_request(ep, req, -ECONNRESET);
  1434. else
  1435. gr_dma_advance(ep, -ECONNRESET);
  1436. } else if (!list_empty(&req->queue)) {
  1437. /* Not being processed - gr_finish_request dequeues it */
  1438. gr_finish_request(ep, req, -ECONNRESET);
  1439. } else {
  1440. ret = -EOPNOTSUPP;
  1441. }
  1442. out:
  1443. spin_unlock_irqrestore(&dev->lock, flags);
  1444. return ret;
  1445. }
  1446. /* Helper for gr_set_halt and gr_set_wedge */
  1447. static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  1448. {
  1449. int ret;
  1450. struct gr_ep *ep;
  1451. if (!_ep)
  1452. return -ENODEV;
  1453. ep = container_of(_ep, struct gr_ep, ep);
  1454. spin_lock(&ep->dev->lock);
  1455. /* Halting an IN endpoint should fail if queue is not empty */
  1456. if (halt && ep->is_in && !list_empty(&ep->queue)) {
  1457. ret = -EAGAIN;
  1458. goto out;
  1459. }
  1460. ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
  1461. out:
  1462. spin_unlock(&ep->dev->lock);
  1463. return ret;
  1464. }
  1465. /* Halt endpoint */
  1466. static int gr_set_halt(struct usb_ep *_ep, int halt)
  1467. {
  1468. return gr_set_halt_wedge(_ep, halt, 0);
  1469. }
  1470. /* Halt and wedge endpoint */
  1471. static int gr_set_wedge(struct usb_ep *_ep)
  1472. {
  1473. return gr_set_halt_wedge(_ep, 1, 1);
  1474. }
  1475. /*
  1476. * Return the total number of bytes currently stored in the internal buffers of
  1477. * the endpoint.
  1478. */
  1479. static int gr_fifo_status(struct usb_ep *_ep)
  1480. {
  1481. struct gr_ep *ep;
  1482. u32 epstat;
  1483. u32 bytes = 0;
  1484. if (!_ep)
  1485. return -ENODEV;
  1486. ep = container_of(_ep, struct gr_ep, ep);
  1487. epstat = gr_read32(&ep->regs->epstat);
  1488. if (epstat & GR_EPSTAT_B0)
  1489. bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
  1490. if (epstat & GR_EPSTAT_B1)
  1491. bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
  1492. return bytes;
  1493. }
  1494. /* Empty data from internal buffers of an endpoint. */
  1495. static void gr_fifo_flush(struct usb_ep *_ep)
  1496. {
  1497. struct gr_ep *ep;
  1498. u32 epctrl;
  1499. if (!_ep)
  1500. return;
  1501. ep = container_of(_ep, struct gr_ep, ep);
  1502. dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
  1503. spin_lock(&ep->dev->lock);
  1504. epctrl = gr_read32(&ep->regs->epctrl);
  1505. epctrl |= GR_EPCTRL_CB;
  1506. gr_write32(&ep->regs->epctrl, epctrl);
  1507. spin_unlock(&ep->dev->lock);
  1508. }
  1509. static const struct usb_ep_ops gr_ep_ops = {
  1510. .enable = gr_ep_enable,
  1511. .disable = gr_ep_disable,
  1512. .alloc_request = gr_alloc_request,
  1513. .free_request = gr_free_request,
  1514. .queue = gr_queue_ext,
  1515. .dequeue = gr_dequeue,
  1516. .set_halt = gr_set_halt,
  1517. .set_wedge = gr_set_wedge,
  1518. .fifo_status = gr_fifo_status,
  1519. .fifo_flush = gr_fifo_flush,
  1520. };
  1521. /* ---------------------------------------------------------------------- */
  1522. /* USB Gadget ops */
  1523. static int gr_get_frame(struct usb_gadget *_gadget)
  1524. {
  1525. struct gr_udc *dev;
  1526. if (!_gadget)
  1527. return -ENODEV;
  1528. dev = container_of(_gadget, struct gr_udc, gadget);
  1529. return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
  1530. }
  1531. static int gr_wakeup(struct usb_gadget *_gadget)
  1532. {
  1533. struct gr_udc *dev;
  1534. if (!_gadget)
  1535. return -ENODEV;
  1536. dev = container_of(_gadget, struct gr_udc, gadget);
  1537. /* Remote wakeup feature not enabled by host*/
  1538. if (!dev->remote_wakeup)
  1539. return -EINVAL;
  1540. spin_lock(&dev->lock);
  1541. gr_write32(&dev->regs->control,
  1542. gr_read32(&dev->regs->control) | GR_CONTROL_RW);
  1543. spin_unlock(&dev->lock);
  1544. return 0;
  1545. }
  1546. static int gr_pullup(struct usb_gadget *_gadget, int is_on)
  1547. {
  1548. struct gr_udc *dev;
  1549. u32 control;
  1550. if (!_gadget)
  1551. return -ENODEV;
  1552. dev = container_of(_gadget, struct gr_udc, gadget);
  1553. spin_lock(&dev->lock);
  1554. control = gr_read32(&dev->regs->control);
  1555. if (is_on)
  1556. control |= GR_CONTROL_EP;
  1557. else
  1558. control &= ~GR_CONTROL_EP;
  1559. gr_write32(&dev->regs->control, control);
  1560. spin_unlock(&dev->lock);
  1561. return 0;
  1562. }
  1563. static int gr_udc_start(struct usb_gadget *gadget,
  1564. struct usb_gadget_driver *driver)
  1565. {
  1566. struct gr_udc *dev = to_gr_udc(gadget);
  1567. spin_lock(&dev->lock);
  1568. /* Hook up the driver */
  1569. driver->driver.bus = NULL;
  1570. dev->driver = driver;
  1571. /* Get ready for host detection */
  1572. gr_enable_vbus_detect(dev);
  1573. spin_unlock(&dev->lock);
  1574. return 0;
  1575. }
  1576. static int gr_udc_stop(struct usb_gadget *gadget)
  1577. {
  1578. struct gr_udc *dev = to_gr_udc(gadget);
  1579. unsigned long flags;
  1580. spin_lock_irqsave(&dev->lock, flags);
  1581. dev->driver = NULL;
  1582. gr_stop_activity(dev);
  1583. spin_unlock_irqrestore(&dev->lock, flags);
  1584. return 0;
  1585. }
  1586. static const struct usb_gadget_ops gr_ops = {
  1587. .get_frame = gr_get_frame,
  1588. .wakeup = gr_wakeup,
  1589. .pullup = gr_pullup,
  1590. .udc_start = gr_udc_start,
  1591. .udc_stop = gr_udc_stop,
  1592. /* Other operations not supported */
  1593. };
  1594. /* ---------------------------------------------------------------------- */
  1595. /* Module probe, removal and of-matching */
  1596. static const char * const onames[] = {
  1597. "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
  1598. "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
  1599. "ep12out", "ep13out", "ep14out", "ep15out"
  1600. };
  1601. static const char * const inames[] = {
  1602. "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
  1603. "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
  1604. "ep12in", "ep13in", "ep14in", "ep15in"
  1605. };
  1606. /* Must be called with dev->lock held */
  1607. static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
  1608. {
  1609. struct gr_ep *ep;
  1610. struct gr_request *req;
  1611. struct usb_request *_req;
  1612. void *buf;
  1613. if (is_in) {
  1614. ep = &dev->epi[num];
  1615. ep->ep.name = inames[num];
  1616. ep->regs = &dev->regs->epi[num];
  1617. } else {
  1618. ep = &dev->epo[num];
  1619. ep->ep.name = onames[num];
  1620. ep->regs = &dev->regs->epo[num];
  1621. }
  1622. gr_ep_reset(ep);
  1623. ep->num = num;
  1624. ep->is_in = is_in;
  1625. ep->dev = dev;
  1626. ep->ep.ops = &gr_ep_ops;
  1627. INIT_LIST_HEAD(&ep->queue);
  1628. if (num == 0) {
  1629. _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
  1630. buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
  1631. if (!_req || !buf) {
  1632. /* possible _req freed by gr_probe via gr_remove */
  1633. return -ENOMEM;
  1634. }
  1635. req = container_of(_req, struct gr_request, req);
  1636. req->req.buf = buf;
  1637. req->req.length = MAX_CTRL_PL_SIZE;
  1638. if (is_in)
  1639. dev->ep0reqi = req; /* Complete gets set as used */
  1640. else
  1641. dev->ep0reqo = req; /* Completion treated separately */
  1642. usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
  1643. ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
  1644. ep->ep.caps.type_control = true;
  1645. } else {
  1646. usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
  1647. list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
  1648. ep->ep.caps.type_iso = true;
  1649. ep->ep.caps.type_bulk = true;
  1650. ep->ep.caps.type_int = true;
  1651. }
  1652. list_add_tail(&ep->ep_list, &dev->ep_list);
  1653. if (is_in)
  1654. ep->ep.caps.dir_in = true;
  1655. else
  1656. ep->ep.caps.dir_out = true;
  1657. ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
  1658. &ep->tailbuf_paddr, GFP_ATOMIC);
  1659. if (!ep->tailbuf)
  1660. return -ENOMEM;
  1661. return 0;
  1662. }
  1663. /* Must be called with dev->lock held */
  1664. static int gr_udc_init(struct gr_udc *dev)
  1665. {
  1666. struct device_node *np = dev->dev->of_node;
  1667. u32 epctrl_val;
  1668. u32 dmactrl_val;
  1669. int i;
  1670. int ret = 0;
  1671. u32 bufsize;
  1672. gr_set_address(dev, 0);
  1673. INIT_LIST_HEAD(&dev->gadget.ep_list);
  1674. dev->gadget.speed = USB_SPEED_UNKNOWN;
  1675. dev->gadget.ep0 = &dev->epi[0].ep;
  1676. INIT_LIST_HEAD(&dev->ep_list);
  1677. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1678. for (i = 0; i < dev->nepo; i++) {
  1679. if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
  1680. bufsize = 1024;
  1681. ret = gr_ep_init(dev, i, 0, bufsize);
  1682. if (ret)
  1683. return ret;
  1684. }
  1685. for (i = 0; i < dev->nepi; i++) {
  1686. if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
  1687. bufsize = 1024;
  1688. ret = gr_ep_init(dev, i, 1, bufsize);
  1689. if (ret)
  1690. return ret;
  1691. }
  1692. /* Must be disabled by default */
  1693. dev->remote_wakeup = 0;
  1694. /* Enable ep0out and ep0in */
  1695. epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
  1696. dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
  1697. gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
  1698. gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
  1699. gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
  1700. gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
  1701. return 0;
  1702. }
  1703. static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
  1704. {
  1705. struct gr_ep *ep;
  1706. if (is_in)
  1707. ep = &dev->epi[num];
  1708. else
  1709. ep = &dev->epo[num];
  1710. if (ep->tailbuf)
  1711. dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
  1712. ep->tailbuf, ep->tailbuf_paddr);
  1713. }
  1714. static int gr_remove(struct platform_device *pdev)
  1715. {
  1716. struct gr_udc *dev = platform_get_drvdata(pdev);
  1717. int i;
  1718. if (dev->added)
  1719. usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
  1720. if (dev->driver)
  1721. return -EBUSY;
  1722. gr_dfs_delete(dev);
  1723. dma_pool_destroy(dev->desc_pool);
  1724. platform_set_drvdata(pdev, NULL);
  1725. gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
  1726. gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
  1727. for (i = 0; i < dev->nepo; i++)
  1728. gr_ep_remove(dev, i, 0);
  1729. for (i = 0; i < dev->nepi; i++)
  1730. gr_ep_remove(dev, i, 1);
  1731. return 0;
  1732. }
  1733. static int gr_request_irq(struct gr_udc *dev, int irq)
  1734. {
  1735. return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
  1736. IRQF_SHARED, driver_name, dev);
  1737. }
  1738. static int gr_probe(struct platform_device *pdev)
  1739. {
  1740. struct gr_udc *dev;
  1741. struct resource *res;
  1742. struct gr_regs __iomem *regs;
  1743. int retval;
  1744. u32 status;
  1745. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  1746. if (!dev)
  1747. return -ENOMEM;
  1748. dev->dev = &pdev->dev;
  1749. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1750. regs = devm_ioremap_resource(dev->dev, res);
  1751. if (IS_ERR(regs))
  1752. return PTR_ERR(regs);
  1753. dev->irq = platform_get_irq(pdev, 0);
  1754. if (dev->irq <= 0) {
  1755. dev_err(dev->dev, "No irq found\n");
  1756. return -ENODEV;
  1757. }
  1758. /* Some core configurations has separate irqs for IN and OUT events */
  1759. dev->irqi = platform_get_irq(pdev, 1);
  1760. if (dev->irqi > 0) {
  1761. dev->irqo = platform_get_irq(pdev, 2);
  1762. if (dev->irqo <= 0) {
  1763. dev_err(dev->dev, "Found irqi but not irqo\n");
  1764. return -ENODEV;
  1765. }
  1766. } else {
  1767. dev->irqi = 0;
  1768. }
  1769. dev->gadget.name = driver_name;
  1770. dev->gadget.max_speed = USB_SPEED_HIGH;
  1771. dev->gadget.ops = &gr_ops;
  1772. spin_lock_init(&dev->lock);
  1773. dev->regs = regs;
  1774. platform_set_drvdata(pdev, dev);
  1775. /* Determine number of endpoints and data interface mode */
  1776. status = gr_read32(&dev->regs->status);
  1777. dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
  1778. dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
  1779. if (!(status & GR_STATUS_DM)) {
  1780. dev_err(dev->dev, "Slave mode cores are not supported\n");
  1781. return -ENODEV;
  1782. }
  1783. /* --- Effects of the following calls might need explicit cleanup --- */
  1784. /* Create DMA pool for descriptors */
  1785. dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
  1786. sizeof(struct gr_dma_desc), 4, 0);
  1787. if (!dev->desc_pool) {
  1788. dev_err(dev->dev, "Could not allocate DMA pool");
  1789. return -ENOMEM;
  1790. }
  1791. spin_lock(&dev->lock);
  1792. /* Inside lock so that no gadget can use this udc until probe is done */
  1793. retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
  1794. if (retval) {
  1795. dev_err(dev->dev, "Could not add gadget udc");
  1796. goto out;
  1797. }
  1798. dev->added = 1;
  1799. retval = gr_udc_init(dev);
  1800. if (retval)
  1801. goto out;
  1802. gr_dfs_create(dev);
  1803. /* Clear all interrupt enables that might be left on since last boot */
  1804. gr_disable_interrupts_and_pullup(dev);
  1805. retval = gr_request_irq(dev, dev->irq);
  1806. if (retval) {
  1807. dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
  1808. goto out;
  1809. }
  1810. if (dev->irqi) {
  1811. retval = gr_request_irq(dev, dev->irqi);
  1812. if (retval) {
  1813. dev_err(dev->dev, "Failed to request irqi %d\n",
  1814. dev->irqi);
  1815. goto out;
  1816. }
  1817. retval = gr_request_irq(dev, dev->irqo);
  1818. if (retval) {
  1819. dev_err(dev->dev, "Failed to request irqo %d\n",
  1820. dev->irqo);
  1821. goto out;
  1822. }
  1823. }
  1824. if (dev->irqi)
  1825. dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
  1826. dev->irq, dev->irqi, dev->irqo);
  1827. else
  1828. dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
  1829. out:
  1830. spin_unlock(&dev->lock);
  1831. if (retval)
  1832. gr_remove(pdev);
  1833. return retval;
  1834. }
  1835. static const struct of_device_id gr_match[] = {
  1836. {.name = "GAISLER_USBDC"},
  1837. {.name = "01_021"},
  1838. {},
  1839. };
  1840. MODULE_DEVICE_TABLE(of, gr_match);
  1841. static struct platform_driver gr_driver = {
  1842. .driver = {
  1843. .name = DRIVER_NAME,
  1844. .of_match_table = gr_match,
  1845. },
  1846. .probe = gr_probe,
  1847. .remove = gr_remove,
  1848. };
  1849. module_platform_driver(gr_driver);
  1850. MODULE_AUTHOR("Aeroflex Gaisler AB.");
  1851. MODULE_DESCRIPTION(DRIVER_DESC);
  1852. MODULE_LICENSE("GPL");