gr_udc.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236
  1. /*
  2. * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
  3. *
  4. * 2013 (c) Aeroflex Gaisler AB
  5. *
  6. * This driver supports GRUSBDC USB Device Controller cores available in the
  7. * GRLIB VHDL IP core library.
  8. *
  9. * Full documentation of the GRUSBDC core can be found here:
  10. * http://www.gaisler.com/products/grlib/grip.pdf
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the License, or (at your
  15. * option) any later version.
  16. *
  17. * Contributors:
  18. * - Andreas Larsson <andreas@gaisler.com>
  19. * - Marko Isomaki
  20. */
  21. /*
  22. * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
  23. * individually configurable to any of the four USB transfer types. This driver
  24. * only supports cores in DMA mode.
  25. */
  26. #include <linux/kernel.h>
  27. #include <linux/module.h>
  28. #include <linux/slab.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/errno.h>
  31. #include <linux/list.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/device.h>
  34. #include <linux/usb/ch9.h>
  35. #include <linux/usb/gadget.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/dmapool.h>
  38. #include <linux/debugfs.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/of_platform.h>
  41. #include <linux/of_irq.h>
  42. #include <linux/of_address.h>
  43. #include <asm/byteorder.h>
  44. #include "gr_udc.h"
  45. #define DRIVER_NAME "gr_udc"
  46. #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
  47. static const char driver_name[] = DRIVER_NAME;
  48. static const char driver_desc[] = DRIVER_DESC;
  49. #define gr_read32(x) (ioread32be((x)))
  50. #define gr_write32(x, v) (iowrite32be((v), (x)))
  51. /* USB speed and corresponding string calculated from status register value */
  52. #define GR_SPEED(status) \
  53. ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
  54. #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
  55. /* Size of hardware buffer calculated from epctrl register value */
  56. #define GR_BUFFER_SIZE(epctrl) \
  57. ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
  58. GR_EPCTRL_BUFSZ_SCALER)
  59. /* ---------------------------------------------------------------------- */
  60. /* Debug printout functionality */
  61. static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
  62. static const char *gr_ep0state_string(enum gr_ep0state state)
  63. {
  64. static const char *const names[] = {
  65. [GR_EP0_DISCONNECT] = "disconnect",
  66. [GR_EP0_SETUP] = "setup",
  67. [GR_EP0_IDATA] = "idata",
  68. [GR_EP0_ODATA] = "odata",
  69. [GR_EP0_ISTATUS] = "istatus",
  70. [GR_EP0_OSTATUS] = "ostatus",
  71. [GR_EP0_STALL] = "stall",
  72. [GR_EP0_SUSPEND] = "suspend",
  73. };
  74. if (state < 0 || state >= ARRAY_SIZE(names))
  75. return "UNKNOWN";
  76. return names[state];
  77. }
  78. #ifdef VERBOSE_DEBUG
  79. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  80. struct gr_request *req)
  81. {
  82. int buflen = ep->is_in ? req->req.length : req->req.actual;
  83. int rowlen = 32;
  84. int plen = min(rowlen, buflen);
  85. dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
  86. (buflen > plen ? " (truncated)" : ""));
  87. print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
  88. rowlen, 4, req->req.buf, plen, false);
  89. }
  90. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  91. u16 value, u16 index, u16 length)
  92. {
  93. dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
  94. type, request, value, index, length);
  95. }
  96. #else /* !VERBOSE_DEBUG */
  97. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  98. struct gr_request *req) {}
  99. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  100. u16 value, u16 index, u16 length) {}
  101. #endif /* VERBOSE_DEBUG */
  102. /* ---------------------------------------------------------------------- */
  103. /* Debugfs functionality */
  104. #ifdef CONFIG_USB_GADGET_DEBUG_FS
  105. static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
  106. {
  107. u32 epctrl = gr_read32(&ep->regs->epctrl);
  108. u32 epstat = gr_read32(&ep->regs->epstat);
  109. int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
  110. struct gr_request *req;
  111. seq_printf(seq, "%s:\n", ep->ep.name);
  112. seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
  113. seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
  114. seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
  115. seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
  116. seq_printf(seq, " dma_start = %d\n", ep->dma_start);
  117. seq_printf(seq, " stopped = %d\n", ep->stopped);
  118. seq_printf(seq, " wedged = %d\n", ep->wedged);
  119. seq_printf(seq, " callback = %d\n", ep->callback);
  120. seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
  121. seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
  122. seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
  123. if (mode == 1 || mode == 3)
  124. seq_printf(seq, " nt = %d\n",
  125. (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
  126. seq_printf(seq, " Buffer 0: %s %s%d\n",
  127. epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
  128. epstat & GR_EPSTAT_BS ? " " : "selected ",
  129. (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
  130. seq_printf(seq, " Buffer 1: %s %s%d\n",
  131. epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
  132. epstat & GR_EPSTAT_BS ? "selected " : " ",
  133. (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
  134. if (list_empty(&ep->queue)) {
  135. seq_puts(seq, " Queue: empty\n\n");
  136. return;
  137. }
  138. seq_puts(seq, " Queue:\n");
  139. list_for_each_entry(req, &ep->queue, queue) {
  140. struct gr_dma_desc *desc;
  141. struct gr_dma_desc *next;
  142. seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
  143. &req->req.buf, req->req.actual, req->req.length);
  144. next = req->first_desc;
  145. do {
  146. desc = next;
  147. next = desc->next_desc;
  148. seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
  149. desc == req->curr_desc ? 'c' : ' ',
  150. desc, desc->paddr, desc->ctrl, desc->data);
  151. } while (desc != req->last_desc);
  152. }
  153. seq_puts(seq, "\n");
  154. }
  155. static int gr_seq_show(struct seq_file *seq, void *v)
  156. {
  157. struct gr_udc *dev = seq->private;
  158. u32 control = gr_read32(&dev->regs->control);
  159. u32 status = gr_read32(&dev->regs->status);
  160. struct gr_ep *ep;
  161. seq_printf(seq, "usb state = %s\n",
  162. usb_state_string(dev->gadget.state));
  163. seq_printf(seq, "address = %d\n",
  164. (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
  165. seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
  166. seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
  167. seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
  168. seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
  169. seq_printf(seq, "test_mode = %d\n", dev->test_mode);
  170. seq_puts(seq, "\n");
  171. list_for_each_entry(ep, &dev->ep_list, ep_list)
  172. gr_seq_ep_show(seq, ep);
  173. return 0;
  174. }
  175. static int gr_dfs_open(struct inode *inode, struct file *file)
  176. {
  177. return single_open(file, gr_seq_show, inode->i_private);
  178. }
  179. static const struct file_operations gr_dfs_fops = {
  180. .owner = THIS_MODULE,
  181. .open = gr_dfs_open,
  182. .read = seq_read,
  183. .llseek = seq_lseek,
  184. .release = single_release,
  185. };
  186. static void gr_dfs_create(struct gr_udc *dev)
  187. {
  188. const char *name = "gr_udc_state";
  189. dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
  190. dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root, dev,
  191. &gr_dfs_fops);
  192. }
  193. static void gr_dfs_delete(struct gr_udc *dev)
  194. {
  195. /* Handles NULL and ERR pointers internally */
  196. debugfs_remove(dev->dfs_state);
  197. debugfs_remove(dev->dfs_root);
  198. }
  199. #else /* !CONFIG_USB_GADGET_DEBUG_FS */
  200. static void gr_dfs_create(struct gr_udc *dev) {}
  201. static void gr_dfs_delete(struct gr_udc *dev) {}
  202. #endif /* CONFIG_USB_GADGET_DEBUG_FS */
  203. /* ---------------------------------------------------------------------- */
  204. /* DMA and request handling */
  205. /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
  206. static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
  207. {
  208. dma_addr_t paddr;
  209. struct gr_dma_desc *dma_desc;
  210. dma_desc = dma_pool_alloc(ep->dev->desc_pool, gfp_flags, &paddr);
  211. if (!dma_desc) {
  212. dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
  213. return NULL;
  214. }
  215. memset(dma_desc, 0, sizeof(*dma_desc));
  216. dma_desc->paddr = paddr;
  217. return dma_desc;
  218. }
  219. static inline void gr_free_dma_desc(struct gr_udc *dev,
  220. struct gr_dma_desc *desc)
  221. {
  222. dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
  223. }
  224. /* Frees the chain of struct gr_dma_desc for the given request */
  225. static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
  226. {
  227. struct gr_dma_desc *desc;
  228. struct gr_dma_desc *next;
  229. next = req->first_desc;
  230. if (!next)
  231. return;
  232. do {
  233. desc = next;
  234. next = desc->next_desc;
  235. gr_free_dma_desc(dev, desc);
  236. } while (desc != req->last_desc);
  237. req->first_desc = NULL;
  238. req->curr_desc = NULL;
  239. req->last_desc = NULL;
  240. }
  241. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
  242. /*
  243. * Frees allocated resources and calls the appropriate completion function/setup
  244. * package handler for a finished request.
  245. *
  246. * Must be called with dev->lock held and irqs disabled.
  247. */
  248. static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
  249. int status)
  250. __releases(&dev->lock)
  251. __acquires(&dev->lock)
  252. {
  253. struct gr_udc *dev;
  254. list_del_init(&req->queue);
  255. if (likely(req->req.status == -EINPROGRESS))
  256. req->req.status = status;
  257. else
  258. status = req->req.status;
  259. dev = ep->dev;
  260. usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
  261. gr_free_dma_desc_chain(dev, req);
  262. if (ep->is_in) /* For OUT, actual gets updated bit by bit */
  263. req->req.actual = req->req.length;
  264. if (!status) {
  265. if (ep->is_in)
  266. gr_dbgprint_request("SENT", ep, req);
  267. else
  268. gr_dbgprint_request("RECV", ep, req);
  269. }
  270. /* Prevent changes to ep->queue during callback */
  271. ep->callback = 1;
  272. if (req == dev->ep0reqo && !status) {
  273. if (req->setup)
  274. gr_ep0_setup(dev, req);
  275. else
  276. dev_err(dev->dev,
  277. "Unexpected non setup packet on ep0in\n");
  278. } else if (req->req.complete) {
  279. spin_unlock(&dev->lock);
  280. req->req.complete(&ep->ep, &req->req);
  281. spin_lock(&dev->lock);
  282. }
  283. ep->callback = 0;
  284. }
  285. static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  286. {
  287. struct gr_request *req;
  288. req = kzalloc(sizeof(*req), gfp_flags);
  289. if (!req)
  290. return NULL;
  291. INIT_LIST_HEAD(&req->queue);
  292. return &req->req;
  293. }
  294. /*
  295. * Starts DMA for endpoint ep if there are requests in the queue.
  296. *
  297. * Must be called with dev->lock held and with !ep->stopped.
  298. */
  299. static void gr_start_dma(struct gr_ep *ep)
  300. {
  301. struct gr_request *req;
  302. u32 dmactrl;
  303. if (list_empty(&ep->queue)) {
  304. ep->dma_start = 0;
  305. return;
  306. }
  307. req = list_first_entry(&ep->queue, struct gr_request, queue);
  308. /* A descriptor should already have been allocated */
  309. BUG_ON(!req->curr_desc);
  310. wmb(); /* Make sure all is settled before handing it over to DMA */
  311. /* Set the descriptor pointer in the hardware */
  312. gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
  313. /* Announce available descriptors */
  314. dmactrl = gr_read32(&ep->regs->dmactrl);
  315. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
  316. ep->dma_start = 1;
  317. }
  318. /*
  319. * Finishes the first request in the ep's queue and, if available, starts the
  320. * next request in queue.
  321. *
  322. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  323. */
  324. static void gr_dma_advance(struct gr_ep *ep, int status)
  325. {
  326. struct gr_request *req;
  327. req = list_first_entry(&ep->queue, struct gr_request, queue);
  328. gr_finish_request(ep, req, status);
  329. gr_start_dma(ep); /* Regardless of ep->dma_start */
  330. }
  331. /*
  332. * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
  333. * transfer to be canceled and clears GR_DMACTRL_DA.
  334. *
  335. * Must be called with dev->lock held.
  336. */
  337. static void gr_abort_dma(struct gr_ep *ep)
  338. {
  339. u32 dmactrl;
  340. dmactrl = gr_read32(&ep->regs->dmactrl);
  341. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
  342. }
  343. /*
  344. * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
  345. * chain.
  346. *
  347. * Size is not used for OUT endpoints. Hardware can not be instructed to handle
  348. * smaller buffer than MAXPL in the OUT direction.
  349. */
  350. static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
  351. dma_addr_t data, unsigned size, gfp_t gfp_flags)
  352. {
  353. struct gr_dma_desc *desc;
  354. desc = gr_alloc_dma_desc(ep, gfp_flags);
  355. if (!desc)
  356. return -ENOMEM;
  357. desc->data = data;
  358. if (ep->is_in)
  359. desc->ctrl =
  360. (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
  361. else
  362. desc->ctrl = GR_DESC_OUT_CTRL_IE;
  363. if (!req->first_desc) {
  364. req->first_desc = desc;
  365. req->curr_desc = desc;
  366. } else {
  367. req->last_desc->next_desc = desc;
  368. req->last_desc->next = desc->paddr;
  369. req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
  370. }
  371. req->last_desc = desc;
  372. return 0;
  373. }
  374. /*
  375. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  376. * together covers req->req.length bytes of the buffer at DMA address
  377. * req->req.dma for the OUT direction.
  378. *
  379. * The first descriptor in the chain is enabled, the rest disabled. The
  380. * interrupt handler will later enable them one by one when needed so we can
  381. * find out when the transfer is finished. For OUT endpoints, all descriptors
  382. * therefore generate interrutps.
  383. */
  384. static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
  385. gfp_t gfp_flags)
  386. {
  387. u16 bytes_left; /* Bytes left to provide descriptors for */
  388. u16 bytes_used; /* Bytes accommodated for */
  389. int ret = 0;
  390. req->first_desc = NULL; /* Signals that no allocation is done yet */
  391. bytes_left = req->req.length;
  392. bytes_used = 0;
  393. while (bytes_left > 0) {
  394. dma_addr_t start = req->req.dma + bytes_used;
  395. u16 size = min(bytes_left, ep->bytes_per_buffer);
  396. /* Should not happen however - gr_queue stops such lengths */
  397. if (size < ep->bytes_per_buffer)
  398. dev_warn(ep->dev->dev,
  399. "Buffer overrun risk: %u < %u bytes/buffer\n",
  400. size, ep->bytes_per_buffer);
  401. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  402. if (ret)
  403. goto alloc_err;
  404. bytes_left -= size;
  405. bytes_used += size;
  406. }
  407. req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  408. return 0;
  409. alloc_err:
  410. gr_free_dma_desc_chain(ep->dev, req);
  411. return ret;
  412. }
  413. /*
  414. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  415. * together covers req->req.length bytes of the buffer at DMA address
  416. * req->req.dma for the IN direction.
  417. *
  418. * When more data is provided than the maximum payload size, the hardware splits
  419. * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
  420. * is always set to a multiple of the maximum payload (restricted to the valid
  421. * number of maximum payloads during high bandwidth isochronous or interrupt
  422. * transfers)
  423. *
  424. * All descriptors are enabled from the beginning and we only generate an
  425. * interrupt for the last one indicating that the entire request has been pushed
  426. * to hardware.
  427. */
  428. static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
  429. gfp_t gfp_flags)
  430. {
  431. u16 bytes_left; /* Bytes left in req to provide descriptors for */
  432. u16 bytes_used; /* Bytes in req accommodated for */
  433. int ret = 0;
  434. req->first_desc = NULL; /* Signals that no allocation is done yet */
  435. bytes_left = req->req.length;
  436. bytes_used = 0;
  437. do { /* Allow for zero length packets */
  438. dma_addr_t start = req->req.dma + bytes_used;
  439. u16 size = min(bytes_left, ep->bytes_per_buffer);
  440. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  441. if (ret)
  442. goto alloc_err;
  443. bytes_left -= size;
  444. bytes_used += size;
  445. } while (bytes_left > 0);
  446. /*
  447. * Send an extra zero length packet to indicate that no more data is
  448. * available when req->req.zero is set and the data length is even
  449. * multiples of ep->ep.maxpacket.
  450. */
  451. if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
  452. ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
  453. if (ret)
  454. goto alloc_err;
  455. }
  456. /*
  457. * For IN packets we only want to know when the last packet has been
  458. * transmitted (not just put into internal buffers).
  459. */
  460. req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
  461. return 0;
  462. alloc_err:
  463. gr_free_dma_desc_chain(ep->dev, req);
  464. return ret;
  465. }
  466. /* Must be called with dev->lock held */
  467. static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
  468. {
  469. struct gr_udc *dev = ep->dev;
  470. int ret;
  471. if (unlikely(!ep->ep.desc && ep->num != 0)) {
  472. dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
  473. return -EINVAL;
  474. }
  475. if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
  476. dev_err(dev->dev,
  477. "Invalid request for %s: buf=%p list_empty=%d\n",
  478. ep->ep.name, req->req.buf, list_empty(&req->queue));
  479. return -EINVAL;
  480. }
  481. /*
  482. * The DMA controller can not handle smaller OUT buffers than
  483. * maxpacket. It could lead to buffer overruns if unexpectedly long
  484. * packet are received.
  485. */
  486. if (!ep->is_in && (req->req.length % ep->ep.maxpacket) != 0) {
  487. dev_err(dev->dev,
  488. "OUT request length %d is not multiple of maxpacket\n",
  489. req->req.length);
  490. return -EMSGSIZE;
  491. }
  492. if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
  493. dev_err(dev->dev, "-ESHUTDOWN");
  494. return -ESHUTDOWN;
  495. }
  496. /* Can't touch registers when suspended */
  497. if (dev->ep0state == GR_EP0_SUSPEND) {
  498. dev_err(dev->dev, "-EBUSY");
  499. return -EBUSY;
  500. }
  501. /* Set up DMA mapping in case the caller didn't */
  502. ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
  503. if (ret) {
  504. dev_err(dev->dev, "usb_gadget_map_request");
  505. return ret;
  506. }
  507. if (ep->is_in)
  508. ret = gr_setup_in_desc_list(ep, req, gfp_flags);
  509. else
  510. ret = gr_setup_out_desc_list(ep, req, gfp_flags);
  511. if (ret)
  512. return ret;
  513. req->req.status = -EINPROGRESS;
  514. req->req.actual = 0;
  515. list_add_tail(&req->queue, &ep->queue);
  516. /* Start DMA if not started, otherwise interrupt handler handles it */
  517. if (!ep->dma_start && likely(!ep->stopped))
  518. gr_start_dma(ep);
  519. return 0;
  520. }
  521. /*
  522. * Queue a request from within the driver.
  523. *
  524. * Must be called with dev->lock held.
  525. */
  526. static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
  527. gfp_t gfp_flags)
  528. {
  529. if (ep->is_in)
  530. gr_dbgprint_request("RESP", ep, req);
  531. return gr_queue(ep, req, gfp_flags);
  532. }
  533. /* ---------------------------------------------------------------------- */
  534. /* General helper functions */
  535. /*
  536. * Dequeue ALL requests.
  537. *
  538. * Must be called with dev->lock held and irqs disabled.
  539. */
  540. static void gr_ep_nuke(struct gr_ep *ep)
  541. {
  542. struct gr_request *req;
  543. ep->stopped = 1;
  544. ep->dma_start = 0;
  545. gr_abort_dma(ep);
  546. while (!list_empty(&ep->queue)) {
  547. req = list_first_entry(&ep->queue, struct gr_request, queue);
  548. gr_finish_request(ep, req, -ESHUTDOWN);
  549. }
  550. }
  551. /*
  552. * Reset the hardware state of this endpoint.
  553. *
  554. * Must be called with dev->lock held.
  555. */
  556. static void gr_ep_reset(struct gr_ep *ep)
  557. {
  558. gr_write32(&ep->regs->epctrl, 0);
  559. gr_write32(&ep->regs->dmactrl, 0);
  560. ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
  561. ep->ep.desc = NULL;
  562. ep->stopped = 1;
  563. ep->dma_start = 0;
  564. }
  565. /*
  566. * Generate STALL on ep0in/out.
  567. *
  568. * Must be called with dev->lock held.
  569. */
  570. static void gr_control_stall(struct gr_udc *dev)
  571. {
  572. u32 epctrl;
  573. epctrl = gr_read32(&dev->epo[0].regs->epctrl);
  574. gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  575. epctrl = gr_read32(&dev->epi[0].regs->epctrl);
  576. gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  577. dev->ep0state = GR_EP0_STALL;
  578. }
  579. /*
  580. * Halts, halts and wedges, or clears halt for an endpoint.
  581. *
  582. * Must be called with dev->lock held.
  583. */
  584. static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
  585. {
  586. u32 epctrl;
  587. int retval = 0;
  588. if (ep->num && !ep->ep.desc)
  589. return -EINVAL;
  590. if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
  591. return -EOPNOTSUPP;
  592. /* Never actually halt ep0, and therefore never clear halt for ep0 */
  593. if (!ep->num) {
  594. if (halt && !fromhost) {
  595. /* ep0 halt from gadget - generate protocol stall */
  596. gr_control_stall(ep->dev);
  597. dev_dbg(ep->dev->dev, "EP: stall ep0\n");
  598. return 0;
  599. }
  600. return -EINVAL;
  601. }
  602. dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
  603. (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
  604. epctrl = gr_read32(&ep->regs->epctrl);
  605. if (halt) {
  606. /* Set HALT */
  607. gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
  608. ep->stopped = 1;
  609. if (wedge)
  610. ep->wedged = 1;
  611. } else {
  612. gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
  613. ep->stopped = 0;
  614. ep->wedged = 0;
  615. /* Things might have been queued up in the meantime */
  616. if (!ep->dma_start)
  617. gr_start_dma(ep);
  618. }
  619. return retval;
  620. }
  621. /* Must be called with dev->lock held */
  622. static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
  623. {
  624. if (dev->ep0state != value)
  625. dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
  626. gr_ep0state_string(value));
  627. dev->ep0state = value;
  628. }
  629. /*
  630. * Should only be called when endpoints can not generate interrupts.
  631. *
  632. * Must be called with dev->lock held.
  633. */
  634. static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
  635. {
  636. gr_write32(&dev->regs->control, 0);
  637. wmb(); /* Make sure that we do not deny one of our interrupts */
  638. dev->irq_enabled = 0;
  639. }
  640. /*
  641. * Stop all device activity and disable data line pullup.
  642. *
  643. * Must be called with dev->lock held and irqs disabled.
  644. */
  645. static void gr_stop_activity(struct gr_udc *dev)
  646. {
  647. struct gr_ep *ep;
  648. list_for_each_entry(ep, &dev->ep_list, ep_list)
  649. gr_ep_nuke(ep);
  650. gr_disable_interrupts_and_pullup(dev);
  651. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  652. usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
  653. }
  654. /* ---------------------------------------------------------------------- */
  655. /* ep0 setup packet handling */
  656. static void gr_ep0_testmode_complete(struct usb_ep *_ep,
  657. struct usb_request *_req)
  658. {
  659. struct gr_ep *ep;
  660. struct gr_udc *dev;
  661. u32 control;
  662. ep = container_of(_ep, struct gr_ep, ep);
  663. dev = ep->dev;
  664. spin_lock(&dev->lock);
  665. control = gr_read32(&dev->regs->control);
  666. control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
  667. gr_write32(&dev->regs->control, control);
  668. spin_unlock(&dev->lock);
  669. }
  670. static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
  671. {
  672. /* Nothing needs to be done here */
  673. }
  674. /*
  675. * Queue a response on ep0in.
  676. *
  677. * Must be called with dev->lock held.
  678. */
  679. static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
  680. void (*complete)(struct usb_ep *ep,
  681. struct usb_request *req))
  682. {
  683. u8 *reqbuf = dev->ep0reqi->req.buf;
  684. int status;
  685. int i;
  686. for (i = 0; i < length; i++)
  687. reqbuf[i] = buf[i];
  688. dev->ep0reqi->req.length = length;
  689. dev->ep0reqi->req.complete = complete;
  690. status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
  691. if (status < 0)
  692. dev_err(dev->dev,
  693. "Could not queue ep0in setup response: %d\n", status);
  694. return status;
  695. }
  696. /*
  697. * Queue a 2 byte response on ep0in.
  698. *
  699. * Must be called with dev->lock held.
  700. */
  701. static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
  702. {
  703. __le16 le_response = cpu_to_le16(response);
  704. return gr_ep0_respond(dev, (u8 *)&le_response, 2,
  705. gr_ep0_dummy_complete);
  706. }
  707. /*
  708. * Queue a ZLP response on ep0in.
  709. *
  710. * Must be called with dev->lock held.
  711. */
  712. static inline int gr_ep0_respond_empty(struct gr_udc *dev)
  713. {
  714. return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
  715. }
  716. /*
  717. * This is run when a SET_ADDRESS request is received. First writes
  718. * the new address to the control register which is updated internally
  719. * when the next IN packet is ACKED.
  720. *
  721. * Must be called with dev->lock held.
  722. */
  723. static void gr_set_address(struct gr_udc *dev, u8 address)
  724. {
  725. u32 control;
  726. control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
  727. control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
  728. control |= GR_CONTROL_SU;
  729. gr_write32(&dev->regs->control, control);
  730. }
  731. /*
  732. * Returns negative for STALL, 0 for successful handling and positive for
  733. * delegation.
  734. *
  735. * Must be called with dev->lock held.
  736. */
  737. static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
  738. u16 value, u16 index)
  739. {
  740. u16 response;
  741. u8 test;
  742. switch (request) {
  743. case USB_REQ_SET_ADDRESS:
  744. dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
  745. gr_set_address(dev, value & 0xff);
  746. if (value)
  747. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  748. else
  749. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  750. return gr_ep0_respond_empty(dev);
  751. case USB_REQ_GET_STATUS:
  752. /* Self powered | remote wakeup */
  753. response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
  754. return gr_ep0_respond_u16(dev, response);
  755. case USB_REQ_SET_FEATURE:
  756. switch (value) {
  757. case USB_DEVICE_REMOTE_WAKEUP:
  758. /* Allow remote wakeup */
  759. dev->remote_wakeup = 1;
  760. return gr_ep0_respond_empty(dev);
  761. case USB_DEVICE_TEST_MODE:
  762. /* The hardware does not support TEST_FORCE_EN */
  763. test = index >> 8;
  764. if (test >= TEST_J && test <= TEST_PACKET) {
  765. dev->test_mode = test;
  766. return gr_ep0_respond(dev, NULL, 0,
  767. gr_ep0_testmode_complete);
  768. }
  769. }
  770. break;
  771. case USB_REQ_CLEAR_FEATURE:
  772. switch (value) {
  773. case USB_DEVICE_REMOTE_WAKEUP:
  774. /* Disallow remote wakeup */
  775. dev->remote_wakeup = 0;
  776. return gr_ep0_respond_empty(dev);
  777. }
  778. break;
  779. }
  780. return 1; /* Delegate the rest */
  781. }
  782. /*
  783. * Returns negative for STALL, 0 for successful handling and positive for
  784. * delegation.
  785. *
  786. * Must be called with dev->lock held.
  787. */
  788. static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
  789. u16 value, u16 index)
  790. {
  791. if (dev->gadget.state != USB_STATE_CONFIGURED)
  792. return -1;
  793. /*
  794. * Should return STALL for invalid interfaces, but udc driver does not
  795. * know anything about that. However, many gadget drivers do not handle
  796. * GET_STATUS so we need to take care of that.
  797. */
  798. switch (request) {
  799. case USB_REQ_GET_STATUS:
  800. return gr_ep0_respond_u16(dev, 0x0000);
  801. case USB_REQ_SET_FEATURE:
  802. case USB_REQ_CLEAR_FEATURE:
  803. /*
  804. * No possible valid standard requests. Still let gadget drivers
  805. * have a go at it.
  806. */
  807. break;
  808. }
  809. return 1; /* Delegate the rest */
  810. }
  811. /*
  812. * Returns negative for STALL, 0 for successful handling and positive for
  813. * delegation.
  814. *
  815. * Must be called with dev->lock held.
  816. */
  817. static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
  818. u16 value, u16 index)
  819. {
  820. struct gr_ep *ep;
  821. int status;
  822. int halted;
  823. u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
  824. u8 is_in = index & USB_ENDPOINT_DIR_MASK;
  825. if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
  826. return -1;
  827. if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
  828. return -1;
  829. ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
  830. switch (request) {
  831. case USB_REQ_GET_STATUS:
  832. halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
  833. return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
  834. case USB_REQ_SET_FEATURE:
  835. switch (value) {
  836. case USB_ENDPOINT_HALT:
  837. status = gr_ep_halt_wedge(ep, 1, 0, 1);
  838. if (status >= 0)
  839. status = gr_ep0_respond_empty(dev);
  840. return status;
  841. }
  842. break;
  843. case USB_REQ_CLEAR_FEATURE:
  844. switch (value) {
  845. case USB_ENDPOINT_HALT:
  846. if (ep->wedged)
  847. return -1;
  848. status = gr_ep_halt_wedge(ep, 0, 0, 1);
  849. if (status >= 0)
  850. status = gr_ep0_respond_empty(dev);
  851. return status;
  852. }
  853. break;
  854. }
  855. return 1; /* Delegate the rest */
  856. }
  857. /* Must be called with dev->lock held */
  858. static void gr_ep0out_requeue(struct gr_udc *dev)
  859. {
  860. int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
  861. if (ret)
  862. dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
  863. ret);
  864. }
  865. /*
  866. * The main function dealing with setup requests on ep0.
  867. *
  868. * Must be called with dev->lock held and irqs disabled
  869. */
  870. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
  871. __releases(&dev->lock)
  872. __acquires(&dev->lock)
  873. {
  874. union {
  875. struct usb_ctrlrequest ctrl;
  876. u8 raw[8];
  877. u32 word[2];
  878. } u;
  879. u8 type;
  880. u8 request;
  881. u16 value;
  882. u16 index;
  883. u16 length;
  884. int i;
  885. int status;
  886. /* Restore from ep0 halt */
  887. if (dev->ep0state == GR_EP0_STALL) {
  888. gr_set_ep0state(dev, GR_EP0_SETUP);
  889. if (!req->req.actual)
  890. goto out;
  891. }
  892. if (dev->ep0state == GR_EP0_ISTATUS) {
  893. gr_set_ep0state(dev, GR_EP0_SETUP);
  894. if (req->req.actual > 0)
  895. dev_dbg(dev->dev,
  896. "Unexpected setup packet at state %s\n",
  897. gr_ep0state_string(GR_EP0_ISTATUS));
  898. else
  899. goto out; /* Got expected ZLP */
  900. } else if (dev->ep0state != GR_EP0_SETUP) {
  901. dev_info(dev->dev,
  902. "Unexpected ep0out request at state %s - stalling\n",
  903. gr_ep0state_string(dev->ep0state));
  904. gr_control_stall(dev);
  905. gr_set_ep0state(dev, GR_EP0_SETUP);
  906. goto out;
  907. } else if (!req->req.actual) {
  908. dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
  909. gr_ep0state_string(dev->ep0state));
  910. goto out;
  911. }
  912. /* Handle SETUP packet */
  913. for (i = 0; i < req->req.actual; i++)
  914. u.raw[i] = ((u8 *)req->req.buf)[i];
  915. type = u.ctrl.bRequestType;
  916. request = u.ctrl.bRequest;
  917. value = le16_to_cpu(u.ctrl.wValue);
  918. index = le16_to_cpu(u.ctrl.wIndex);
  919. length = le16_to_cpu(u.ctrl.wLength);
  920. gr_dbgprint_devreq(dev, type, request, value, index, length);
  921. /* Check for data stage */
  922. if (length) {
  923. if (type & USB_DIR_IN)
  924. gr_set_ep0state(dev, GR_EP0_IDATA);
  925. else
  926. gr_set_ep0state(dev, GR_EP0_ODATA);
  927. }
  928. status = 1; /* Positive status flags delegation */
  929. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  930. switch (type & USB_RECIP_MASK) {
  931. case USB_RECIP_DEVICE:
  932. status = gr_device_request(dev, type, request,
  933. value, index);
  934. break;
  935. case USB_RECIP_ENDPOINT:
  936. status = gr_endpoint_request(dev, type, request,
  937. value, index);
  938. break;
  939. case USB_RECIP_INTERFACE:
  940. status = gr_interface_request(dev, type, request,
  941. value, index);
  942. break;
  943. }
  944. }
  945. if (status > 0) {
  946. spin_unlock(&dev->lock);
  947. dev_vdbg(dev->dev, "DELEGATE\n");
  948. status = dev->driver->setup(&dev->gadget, &u.ctrl);
  949. spin_lock(&dev->lock);
  950. }
  951. /* Generate STALL on both ep0out and ep0in if requested */
  952. if (unlikely(status < 0)) {
  953. dev_vdbg(dev->dev, "STALL\n");
  954. gr_control_stall(dev);
  955. }
  956. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
  957. request == USB_REQ_SET_CONFIGURATION) {
  958. if (!value) {
  959. dev_dbg(dev->dev, "STATUS: deconfigured\n");
  960. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  961. } else if (status >= 0) {
  962. /* Not configured unless gadget OK:s it */
  963. dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
  964. usb_gadget_set_state(&dev->gadget,
  965. USB_STATE_CONFIGURED);
  966. }
  967. }
  968. /* Get ready for next stage */
  969. if (dev->ep0state == GR_EP0_ODATA)
  970. gr_set_ep0state(dev, GR_EP0_OSTATUS);
  971. else if (dev->ep0state == GR_EP0_IDATA)
  972. gr_set_ep0state(dev, GR_EP0_ISTATUS);
  973. else
  974. gr_set_ep0state(dev, GR_EP0_SETUP);
  975. out:
  976. gr_ep0out_requeue(dev);
  977. }
  978. /* ---------------------------------------------------------------------- */
  979. /* VBUS and USB reset handling */
  980. /* Must be called with dev->lock held and irqs disabled */
  981. static void gr_vbus_connected(struct gr_udc *dev, u32 status)
  982. {
  983. u32 control;
  984. dev->gadget.speed = GR_SPEED(status);
  985. usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
  986. /* Turn on full interrupts and pullup */
  987. control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
  988. GR_CONTROL_SP | GR_CONTROL_EP);
  989. gr_write32(&dev->regs->control, control);
  990. }
  991. /* Must be called with dev->lock held */
  992. static void gr_enable_vbus_detect(struct gr_udc *dev)
  993. {
  994. u32 status;
  995. dev->irq_enabled = 1;
  996. wmb(); /* Make sure we do not ignore an interrupt */
  997. gr_write32(&dev->regs->control, GR_CONTROL_VI);
  998. /* Take care of the case we are already plugged in at this point */
  999. status = gr_read32(&dev->regs->status);
  1000. if (status & GR_STATUS_VB)
  1001. gr_vbus_connected(dev, status);
  1002. }
  1003. /* Must be called with dev->lock held and irqs disabled */
  1004. static void gr_vbus_disconnected(struct gr_udc *dev)
  1005. {
  1006. gr_stop_activity(dev);
  1007. /* Report disconnect */
  1008. if (dev->driver && dev->driver->disconnect) {
  1009. spin_unlock(&dev->lock);
  1010. dev->driver->disconnect(&dev->gadget);
  1011. spin_lock(&dev->lock);
  1012. }
  1013. gr_enable_vbus_detect(dev);
  1014. }
  1015. /* Must be called with dev->lock held and irqs disabled */
  1016. static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
  1017. {
  1018. gr_set_address(dev, 0);
  1019. gr_set_ep0state(dev, GR_EP0_SETUP);
  1020. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  1021. dev->gadget.speed = GR_SPEED(status);
  1022. gr_ep_nuke(&dev->epo[0]);
  1023. gr_ep_nuke(&dev->epi[0]);
  1024. dev->epo[0].stopped = 0;
  1025. dev->epi[0].stopped = 0;
  1026. gr_ep0out_requeue(dev);
  1027. }
  1028. /* ---------------------------------------------------------------------- */
  1029. /* Irq handling */
  1030. /*
  1031. * Handles interrupts from in endpoints. Returns whether something was handled.
  1032. *
  1033. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1034. */
  1035. static int gr_handle_in_ep(struct gr_ep *ep)
  1036. {
  1037. struct gr_request *req;
  1038. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1039. if (!req->last_desc)
  1040. return 0;
  1041. if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
  1042. return 0; /* Not put in hardware buffers yet */
  1043. if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
  1044. return 0; /* Not transmitted yet, still in hardware buffers */
  1045. /* Write complete */
  1046. gr_dma_advance(ep, 0);
  1047. return 1;
  1048. }
  1049. /*
  1050. * Handles interrupts from out endpoints. Returns whether something was handled.
  1051. *
  1052. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1053. */
  1054. static int gr_handle_out_ep(struct gr_ep *ep)
  1055. {
  1056. u32 ep_dmactrl;
  1057. u32 ctrl;
  1058. u16 len;
  1059. struct gr_request *req;
  1060. struct gr_udc *dev = ep->dev;
  1061. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1062. if (!req->curr_desc)
  1063. return 0;
  1064. ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
  1065. if (ctrl & GR_DESC_OUT_CTRL_EN)
  1066. return 0; /* Not received yet */
  1067. /* Read complete */
  1068. len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
  1069. req->req.actual += len;
  1070. if (ctrl & GR_DESC_OUT_CTRL_SE)
  1071. req->setup = 1;
  1072. if (len < ep->ep.maxpacket || req->req.actual == req->req.length) {
  1073. /* Short packet or the expected size - we are done */
  1074. if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
  1075. /*
  1076. * Send a status stage ZLP to ack the DATA stage in the
  1077. * OUT direction. This needs to be done before
  1078. * gr_dma_advance as that can lead to a call to
  1079. * ep0_setup that can change dev->ep0state.
  1080. */
  1081. gr_ep0_respond_empty(dev);
  1082. gr_set_ep0state(dev, GR_EP0_SETUP);
  1083. }
  1084. gr_dma_advance(ep, 0);
  1085. } else {
  1086. /* Not done yet. Enable the next descriptor to receive more. */
  1087. req->curr_desc = req->curr_desc->next_desc;
  1088. req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  1089. ep_dmactrl = gr_read32(&ep->regs->dmactrl);
  1090. gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
  1091. }
  1092. return 1;
  1093. }
  1094. /*
  1095. * Handle state changes. Returns whether something was handled.
  1096. *
  1097. * Must be called with dev->lock held and irqs disabled.
  1098. */
  1099. static int gr_handle_state_changes(struct gr_udc *dev)
  1100. {
  1101. u32 status = gr_read32(&dev->regs->status);
  1102. int handled = 0;
  1103. int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
  1104. dev->gadget.state == USB_STATE_ATTACHED);
  1105. /* VBUS valid detected */
  1106. if (!powstate && (status & GR_STATUS_VB)) {
  1107. dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
  1108. gr_vbus_connected(dev, status);
  1109. handled = 1;
  1110. }
  1111. /* Disconnect */
  1112. if (powstate && !(status & GR_STATUS_VB)) {
  1113. dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
  1114. gr_vbus_disconnected(dev);
  1115. handled = 1;
  1116. }
  1117. /* USB reset detected */
  1118. if (status & GR_STATUS_UR) {
  1119. dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
  1120. GR_SPEED_STR(status));
  1121. gr_write32(&dev->regs->status, GR_STATUS_UR);
  1122. gr_udc_usbreset(dev, status);
  1123. handled = 1;
  1124. }
  1125. /* Speed change */
  1126. if (dev->gadget.speed != GR_SPEED(status)) {
  1127. dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
  1128. GR_SPEED_STR(status));
  1129. dev->gadget.speed = GR_SPEED(status);
  1130. handled = 1;
  1131. }
  1132. /* Going into suspend */
  1133. if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
  1134. dev_dbg(dev->dev, "STATUS: USB suspend\n");
  1135. gr_set_ep0state(dev, GR_EP0_SUSPEND);
  1136. dev->suspended_from = dev->gadget.state;
  1137. usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
  1138. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1139. dev->driver && dev->driver->suspend) {
  1140. spin_unlock(&dev->lock);
  1141. dev->driver->suspend(&dev->gadget);
  1142. spin_lock(&dev->lock);
  1143. }
  1144. handled = 1;
  1145. }
  1146. /* Coming out of suspend */
  1147. if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
  1148. dev_dbg(dev->dev, "STATUS: USB resume\n");
  1149. if (dev->suspended_from == USB_STATE_POWERED)
  1150. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1151. else
  1152. gr_set_ep0state(dev, GR_EP0_SETUP);
  1153. usb_gadget_set_state(&dev->gadget, dev->suspended_from);
  1154. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1155. dev->driver && dev->driver->resume) {
  1156. spin_unlock(&dev->lock);
  1157. dev->driver->resume(&dev->gadget);
  1158. spin_lock(&dev->lock);
  1159. }
  1160. handled = 1;
  1161. }
  1162. return handled;
  1163. }
  1164. /* Non-interrupt context irq handler */
  1165. static irqreturn_t gr_irq_handler(int irq, void *_dev)
  1166. {
  1167. struct gr_udc *dev = _dev;
  1168. struct gr_ep *ep;
  1169. int handled = 0;
  1170. int i;
  1171. unsigned long flags;
  1172. spin_lock_irqsave(&dev->lock, flags);
  1173. if (!dev->irq_enabled)
  1174. goto out;
  1175. /*
  1176. * Check IN ep interrupts. We check these before the OUT eps because
  1177. * some gadgets reuse the request that might already be currently
  1178. * outstanding and needs to be completed (mainly setup requests).
  1179. */
  1180. for (i = 0; i < dev->nepi; i++) {
  1181. ep = &dev->epi[i];
  1182. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1183. handled = gr_handle_in_ep(ep) || handled;
  1184. }
  1185. /* Check OUT ep interrupts */
  1186. for (i = 0; i < dev->nepo; i++) {
  1187. ep = &dev->epo[i];
  1188. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1189. handled = gr_handle_out_ep(ep) || handled;
  1190. }
  1191. /* Check status interrupts */
  1192. handled = gr_handle_state_changes(dev) || handled;
  1193. /*
  1194. * Check AMBA DMA errors. Only check if we didn't find anything else to
  1195. * handle because this shouldn't happen if we did everything right.
  1196. */
  1197. if (!handled) {
  1198. list_for_each_entry(ep, &dev->ep_list, ep_list) {
  1199. if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
  1200. dev_err(dev->dev,
  1201. "AMBA Error occurred for %s\n",
  1202. ep->ep.name);
  1203. handled = 1;
  1204. }
  1205. }
  1206. }
  1207. out:
  1208. spin_unlock_irqrestore(&dev->lock, flags);
  1209. return handled ? IRQ_HANDLED : IRQ_NONE;
  1210. }
  1211. /* Interrupt context irq handler */
  1212. static irqreturn_t gr_irq(int irq, void *_dev)
  1213. {
  1214. struct gr_udc *dev = _dev;
  1215. if (!dev->irq_enabled)
  1216. return IRQ_NONE;
  1217. return IRQ_WAKE_THREAD;
  1218. }
  1219. /* ---------------------------------------------------------------------- */
  1220. /* USB ep ops */
  1221. /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
  1222. static int gr_ep_enable(struct usb_ep *_ep,
  1223. const struct usb_endpoint_descriptor *desc)
  1224. {
  1225. struct gr_udc *dev;
  1226. struct gr_ep *ep;
  1227. u8 mode;
  1228. u8 nt;
  1229. u16 max;
  1230. u16 buffer_size = 0;
  1231. u32 epctrl;
  1232. ep = container_of(_ep, struct gr_ep, ep);
  1233. if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
  1234. return -EINVAL;
  1235. dev = ep->dev;
  1236. /* 'ep0' IN and OUT are reserved */
  1237. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1238. return -EINVAL;
  1239. if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
  1240. return -ESHUTDOWN;
  1241. /* Make sure we are clear for enabling */
  1242. epctrl = gr_read32(&ep->regs->epctrl);
  1243. if (epctrl & GR_EPCTRL_EV)
  1244. return -EBUSY;
  1245. /* Check that directions match */
  1246. if (!ep->is_in != !usb_endpoint_dir_in(desc))
  1247. return -EINVAL;
  1248. /* Check ep num */
  1249. if ((!ep->is_in && ep->num >= dev->nepo) ||
  1250. (ep->is_in && ep->num >= dev->nepi))
  1251. return -EINVAL;
  1252. if (usb_endpoint_xfer_control(desc)) {
  1253. mode = 0;
  1254. } else if (usb_endpoint_xfer_isoc(desc)) {
  1255. mode = 1;
  1256. } else if (usb_endpoint_xfer_bulk(desc)) {
  1257. mode = 2;
  1258. } else if (usb_endpoint_xfer_int(desc)) {
  1259. mode = 3;
  1260. } else {
  1261. dev_err(dev->dev, "Unknown transfer type for %s\n",
  1262. ep->ep.name);
  1263. return -EINVAL;
  1264. }
  1265. /*
  1266. * Bits 10-0 set the max payload. 12-11 set the number of
  1267. * additional transactions.
  1268. */
  1269. max = 0x7ff & usb_endpoint_maxp(desc);
  1270. nt = 0x3 & (usb_endpoint_maxp(desc) >> 11);
  1271. buffer_size = GR_BUFFER_SIZE(epctrl);
  1272. if (nt && (mode == 0 || mode == 2)) {
  1273. dev_err(dev->dev,
  1274. "%s mode: multiple trans./microframe not valid\n",
  1275. (mode == 2 ? "Bulk" : "Control"));
  1276. return -EINVAL;
  1277. } else if (nt == 0x3) {
  1278. dev_err(dev->dev,
  1279. "Invalid value 0x3 for additional trans./microframe\n");
  1280. return -EINVAL;
  1281. } else if ((nt + 1) * max > buffer_size) {
  1282. dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
  1283. buffer_size, (nt + 1), max);
  1284. return -EINVAL;
  1285. } else if (max == 0) {
  1286. dev_err(dev->dev, "Max payload cannot be set to 0\n");
  1287. return -EINVAL;
  1288. } else if (max > ep->ep.maxpacket_limit) {
  1289. dev_err(dev->dev, "Requested max payload %d > limit %d\n",
  1290. max, ep->ep.maxpacket_limit);
  1291. return -EINVAL;
  1292. }
  1293. spin_lock(&ep->dev->lock);
  1294. if (!ep->stopped) {
  1295. spin_unlock(&ep->dev->lock);
  1296. return -EBUSY;
  1297. }
  1298. ep->stopped = 0;
  1299. ep->wedged = 0;
  1300. ep->ep.desc = desc;
  1301. ep->ep.maxpacket = max;
  1302. ep->dma_start = 0;
  1303. if (nt) {
  1304. /*
  1305. * Maximum possible size of all payloads in one microframe
  1306. * regardless of direction when using high-bandwidth mode.
  1307. */
  1308. ep->bytes_per_buffer = (nt + 1) * max;
  1309. } else if (ep->is_in) {
  1310. /*
  1311. * The biggest multiple of maximum packet size that fits into
  1312. * the buffer. The hardware will split up into many packets in
  1313. * the IN direction.
  1314. */
  1315. ep->bytes_per_buffer = (buffer_size / max) * max;
  1316. } else {
  1317. /*
  1318. * Only single packets will be placed the buffers in the OUT
  1319. * direction.
  1320. */
  1321. ep->bytes_per_buffer = max;
  1322. }
  1323. epctrl = (max << GR_EPCTRL_MAXPL_POS)
  1324. | (nt << GR_EPCTRL_NT_POS)
  1325. | (mode << GR_EPCTRL_TT_POS)
  1326. | GR_EPCTRL_EV;
  1327. if (ep->is_in)
  1328. epctrl |= GR_EPCTRL_PI;
  1329. gr_write32(&ep->regs->epctrl, epctrl);
  1330. gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
  1331. spin_unlock(&ep->dev->lock);
  1332. dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
  1333. ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
  1334. return 0;
  1335. }
  1336. /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
  1337. static int gr_ep_disable(struct usb_ep *_ep)
  1338. {
  1339. struct gr_ep *ep;
  1340. struct gr_udc *dev;
  1341. unsigned long flags;
  1342. ep = container_of(_ep, struct gr_ep, ep);
  1343. if (!_ep || !ep->ep.desc)
  1344. return -ENODEV;
  1345. dev = ep->dev;
  1346. /* 'ep0' IN and OUT are reserved */
  1347. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1348. return -EINVAL;
  1349. if (dev->ep0state == GR_EP0_SUSPEND)
  1350. return -EBUSY;
  1351. dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
  1352. spin_lock_irqsave(&dev->lock, flags);
  1353. gr_ep_nuke(ep);
  1354. gr_ep_reset(ep);
  1355. ep->ep.desc = NULL;
  1356. spin_unlock_irqrestore(&dev->lock, flags);
  1357. return 0;
  1358. }
  1359. /*
  1360. * Frees a request, but not any DMA buffers associated with it
  1361. * (gr_finish_request should already have taken care of that).
  1362. */
  1363. static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
  1364. {
  1365. struct gr_request *req;
  1366. if (!_ep || !_req)
  1367. return;
  1368. req = container_of(_req, struct gr_request, req);
  1369. /* Leads to memory leak */
  1370. WARN(!list_empty(&req->queue),
  1371. "request not dequeued properly before freeing\n");
  1372. kfree(req);
  1373. }
  1374. /* Queue a request from the gadget */
  1375. static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
  1376. gfp_t gfp_flags)
  1377. {
  1378. struct gr_ep *ep;
  1379. struct gr_request *req;
  1380. struct gr_udc *dev;
  1381. int ret;
  1382. if (unlikely(!_ep || !_req))
  1383. return -EINVAL;
  1384. ep = container_of(_ep, struct gr_ep, ep);
  1385. req = container_of(_req, struct gr_request, req);
  1386. dev = ep->dev;
  1387. spin_lock(&ep->dev->lock);
  1388. /*
  1389. * The ep0 pointer in the gadget struct is used both for ep0in and
  1390. * ep0out. In a data stage in the out direction ep0out needs to be used
  1391. * instead of the default ep0in. Completion functions might use
  1392. * driver_data, so that needs to be copied as well.
  1393. */
  1394. if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
  1395. ep = &dev->epo[0];
  1396. ep->ep.driver_data = dev->epi[0].ep.driver_data;
  1397. }
  1398. if (ep->is_in)
  1399. gr_dbgprint_request("EXTERN", ep, req);
  1400. ret = gr_queue(ep, req, GFP_ATOMIC);
  1401. spin_unlock(&ep->dev->lock);
  1402. return ret;
  1403. }
  1404. /* Dequeue JUST ONE request */
  1405. static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  1406. {
  1407. struct gr_request *req;
  1408. struct gr_ep *ep;
  1409. struct gr_udc *dev;
  1410. int ret = 0;
  1411. unsigned long flags;
  1412. ep = container_of(_ep, struct gr_ep, ep);
  1413. if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
  1414. return -EINVAL;
  1415. dev = ep->dev;
  1416. if (!dev->driver)
  1417. return -ESHUTDOWN;
  1418. /* We can't touch (DMA) registers when suspended */
  1419. if (dev->ep0state == GR_EP0_SUSPEND)
  1420. return -EBUSY;
  1421. spin_lock_irqsave(&dev->lock, flags);
  1422. /* Make sure it's actually queued on this endpoint */
  1423. list_for_each_entry(req, &ep->queue, queue) {
  1424. if (&req->req == _req)
  1425. break;
  1426. }
  1427. if (&req->req != _req) {
  1428. ret = -EINVAL;
  1429. goto out;
  1430. }
  1431. if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
  1432. /* This request is currently being processed */
  1433. gr_abort_dma(ep);
  1434. if (ep->stopped)
  1435. gr_finish_request(ep, req, -ECONNRESET);
  1436. else
  1437. gr_dma_advance(ep, -ECONNRESET);
  1438. } else if (!list_empty(&req->queue)) {
  1439. /* Not being processed - gr_finish_request dequeues it */
  1440. gr_finish_request(ep, req, -ECONNRESET);
  1441. } else {
  1442. ret = -EOPNOTSUPP;
  1443. }
  1444. out:
  1445. spin_unlock_irqrestore(&dev->lock, flags);
  1446. return ret;
  1447. }
  1448. /* Helper for gr_set_halt and gr_set_wedge */
  1449. static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  1450. {
  1451. int ret;
  1452. struct gr_ep *ep;
  1453. if (!_ep)
  1454. return -ENODEV;
  1455. ep = container_of(_ep, struct gr_ep, ep);
  1456. spin_lock(&ep->dev->lock);
  1457. /* Halting an IN endpoint should fail if queue is not empty */
  1458. if (halt && ep->is_in && !list_empty(&ep->queue)) {
  1459. ret = -EAGAIN;
  1460. goto out;
  1461. }
  1462. ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
  1463. out:
  1464. spin_unlock(&ep->dev->lock);
  1465. return ret;
  1466. }
  1467. /* Halt endpoint */
  1468. static int gr_set_halt(struct usb_ep *_ep, int halt)
  1469. {
  1470. return gr_set_halt_wedge(_ep, halt, 0);
  1471. }
  1472. /* Halt and wedge endpoint */
  1473. static int gr_set_wedge(struct usb_ep *_ep)
  1474. {
  1475. return gr_set_halt_wedge(_ep, 1, 1);
  1476. }
  1477. /*
  1478. * Return the total number of bytes currently stored in the internal buffers of
  1479. * the endpoint.
  1480. */
  1481. static int gr_fifo_status(struct usb_ep *_ep)
  1482. {
  1483. struct gr_ep *ep;
  1484. u32 epstat;
  1485. u32 bytes = 0;
  1486. if (!_ep)
  1487. return -ENODEV;
  1488. ep = container_of(_ep, struct gr_ep, ep);
  1489. epstat = gr_read32(&ep->regs->epstat);
  1490. if (epstat & GR_EPSTAT_B0)
  1491. bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
  1492. if (epstat & GR_EPSTAT_B1)
  1493. bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
  1494. return bytes;
  1495. }
  1496. /* Empty data from internal buffers of an endpoint. */
  1497. static void gr_fifo_flush(struct usb_ep *_ep)
  1498. {
  1499. struct gr_ep *ep;
  1500. u32 epctrl;
  1501. if (!_ep)
  1502. return;
  1503. ep = container_of(_ep, struct gr_ep, ep);
  1504. dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
  1505. spin_lock(&ep->dev->lock);
  1506. epctrl = gr_read32(&ep->regs->epctrl);
  1507. epctrl |= GR_EPCTRL_CB;
  1508. gr_write32(&ep->regs->epctrl, epctrl);
  1509. spin_unlock(&ep->dev->lock);
  1510. }
  1511. static struct usb_ep_ops gr_ep_ops = {
  1512. .enable = gr_ep_enable,
  1513. .disable = gr_ep_disable,
  1514. .alloc_request = gr_alloc_request,
  1515. .free_request = gr_free_request,
  1516. .queue = gr_queue_ext,
  1517. .dequeue = gr_dequeue,
  1518. .set_halt = gr_set_halt,
  1519. .set_wedge = gr_set_wedge,
  1520. .fifo_status = gr_fifo_status,
  1521. .fifo_flush = gr_fifo_flush,
  1522. };
  1523. /* ---------------------------------------------------------------------- */
  1524. /* USB Gadget ops */
  1525. static int gr_get_frame(struct usb_gadget *_gadget)
  1526. {
  1527. struct gr_udc *dev;
  1528. if (!_gadget)
  1529. return -ENODEV;
  1530. dev = container_of(_gadget, struct gr_udc, gadget);
  1531. return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
  1532. }
  1533. static int gr_wakeup(struct usb_gadget *_gadget)
  1534. {
  1535. struct gr_udc *dev;
  1536. if (!_gadget)
  1537. return -ENODEV;
  1538. dev = container_of(_gadget, struct gr_udc, gadget);
  1539. /* Remote wakeup feature not enabled by host*/
  1540. if (!dev->remote_wakeup)
  1541. return -EINVAL;
  1542. spin_lock(&dev->lock);
  1543. gr_write32(&dev->regs->control,
  1544. gr_read32(&dev->regs->control) | GR_CONTROL_RW);
  1545. spin_unlock(&dev->lock);
  1546. return 0;
  1547. }
  1548. static int gr_pullup(struct usb_gadget *_gadget, int is_on)
  1549. {
  1550. struct gr_udc *dev;
  1551. u32 control;
  1552. if (!_gadget)
  1553. return -ENODEV;
  1554. dev = container_of(_gadget, struct gr_udc, gadget);
  1555. spin_lock(&dev->lock);
  1556. control = gr_read32(&dev->regs->control);
  1557. if (is_on)
  1558. control |= GR_CONTROL_EP;
  1559. else
  1560. control &= ~GR_CONTROL_EP;
  1561. gr_write32(&dev->regs->control, control);
  1562. spin_unlock(&dev->lock);
  1563. return 0;
  1564. }
  1565. static int gr_udc_start(struct usb_gadget *gadget,
  1566. struct usb_gadget_driver *driver)
  1567. {
  1568. struct gr_udc *dev = to_gr_udc(gadget);
  1569. spin_lock(&dev->lock);
  1570. /* Hook up the driver */
  1571. driver->driver.bus = NULL;
  1572. dev->driver = driver;
  1573. /* Get ready for host detection */
  1574. gr_enable_vbus_detect(dev);
  1575. spin_unlock(&dev->lock);
  1576. dev_info(dev->dev, "Started with gadget driver '%s'\n",
  1577. driver->driver.name);
  1578. return 0;
  1579. }
  1580. static int gr_udc_stop(struct usb_gadget *gadget,
  1581. struct usb_gadget_driver *driver)
  1582. {
  1583. struct gr_udc *dev = to_gr_udc(gadget);
  1584. unsigned long flags;
  1585. spin_lock_irqsave(&dev->lock, flags);
  1586. dev->driver = NULL;
  1587. gr_stop_activity(dev);
  1588. spin_unlock_irqrestore(&dev->lock, flags);
  1589. dev_info(dev->dev, "Stopped\n");
  1590. return 0;
  1591. }
  1592. static const struct usb_gadget_ops gr_ops = {
  1593. .get_frame = gr_get_frame,
  1594. .wakeup = gr_wakeup,
  1595. .pullup = gr_pullup,
  1596. .udc_start = gr_udc_start,
  1597. .udc_stop = gr_udc_stop,
  1598. /* Other operations not supported */
  1599. };
  1600. /* ---------------------------------------------------------------------- */
  1601. /* Module probe, removal and of-matching */
  1602. static const char * const onames[] = {
  1603. "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
  1604. "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
  1605. "ep12out", "ep13out", "ep14out", "ep15out"
  1606. };
  1607. static const char * const inames[] = {
  1608. "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
  1609. "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
  1610. "ep12in", "ep13in", "ep14in", "ep15in"
  1611. };
  1612. /* Must be called with dev->lock held */
  1613. static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
  1614. {
  1615. struct gr_ep *ep;
  1616. struct gr_request *req;
  1617. struct usb_request *_req;
  1618. void *buf;
  1619. if (is_in) {
  1620. ep = &dev->epi[num];
  1621. ep->ep.name = inames[num];
  1622. ep->regs = &dev->regs->epi[num];
  1623. } else {
  1624. ep = &dev->epo[num];
  1625. ep->ep.name = onames[num];
  1626. ep->regs = &dev->regs->epo[num];
  1627. }
  1628. gr_ep_reset(ep);
  1629. ep->num = num;
  1630. ep->is_in = is_in;
  1631. ep->dev = dev;
  1632. ep->ep.ops = &gr_ep_ops;
  1633. INIT_LIST_HEAD(&ep->queue);
  1634. if (num == 0) {
  1635. _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
  1636. buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
  1637. if (!_req || !buf) {
  1638. /* possible _req freed by gr_probe via gr_remove */
  1639. return -ENOMEM;
  1640. }
  1641. req = container_of(_req, struct gr_request, req);
  1642. req->req.buf = buf;
  1643. req->req.length = MAX_CTRL_PL_SIZE;
  1644. if (is_in)
  1645. dev->ep0reqi = req; /* Complete gets set as used */
  1646. else
  1647. dev->ep0reqo = req; /* Completion treated separately */
  1648. usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
  1649. ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
  1650. } else {
  1651. usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
  1652. list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
  1653. }
  1654. list_add_tail(&ep->ep_list, &dev->ep_list);
  1655. return 0;
  1656. }
  1657. /* Must be called with dev->lock held */
  1658. static int gr_udc_init(struct gr_udc *dev)
  1659. {
  1660. struct device_node *np = dev->dev->of_node;
  1661. u32 epctrl_val;
  1662. u32 dmactrl_val;
  1663. int i;
  1664. int ret = 0;
  1665. u32 bufsize;
  1666. gr_set_address(dev, 0);
  1667. INIT_LIST_HEAD(&dev->gadget.ep_list);
  1668. dev->gadget.speed = USB_SPEED_UNKNOWN;
  1669. dev->gadget.ep0 = &dev->epi[0].ep;
  1670. INIT_LIST_HEAD(&dev->ep_list);
  1671. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1672. for (i = 0; i < dev->nepo; i++) {
  1673. if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
  1674. bufsize = 1024;
  1675. ret = gr_ep_init(dev, i, 0, bufsize);
  1676. if (ret)
  1677. return ret;
  1678. }
  1679. for (i = 0; i < dev->nepi; i++) {
  1680. if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
  1681. bufsize = 1024;
  1682. ret = gr_ep_init(dev, i, 1, bufsize);
  1683. if (ret)
  1684. return ret;
  1685. }
  1686. /* Must be disabled by default */
  1687. dev->remote_wakeup = 0;
  1688. /* Enable ep0out and ep0in */
  1689. epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
  1690. dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
  1691. gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
  1692. gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
  1693. gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
  1694. gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
  1695. return 0;
  1696. }
  1697. static int gr_remove(struct platform_device *pdev)
  1698. {
  1699. struct gr_udc *dev = platform_get_drvdata(pdev);
  1700. if (dev->added)
  1701. usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
  1702. if (dev->driver)
  1703. return -EBUSY;
  1704. gr_dfs_delete(dev);
  1705. if (dev->desc_pool)
  1706. dma_pool_destroy(dev->desc_pool);
  1707. platform_set_drvdata(pdev, NULL);
  1708. gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
  1709. gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
  1710. return 0;
  1711. }
  1712. static int gr_request_irq(struct gr_udc *dev, int irq)
  1713. {
  1714. return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
  1715. IRQF_SHARED, driver_name, dev);
  1716. }
  1717. static int gr_probe(struct platform_device *pdev)
  1718. {
  1719. struct gr_udc *dev;
  1720. struct resource *res;
  1721. struct gr_regs __iomem *regs;
  1722. int retval;
  1723. u32 status;
  1724. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  1725. if (!dev)
  1726. return -ENOMEM;
  1727. dev->dev = &pdev->dev;
  1728. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1729. regs = devm_ioremap_resource(dev->dev, res);
  1730. if (IS_ERR(regs))
  1731. return PTR_ERR(regs);
  1732. dev->irq = platform_get_irq(pdev, 0);
  1733. if (dev->irq <= 0) {
  1734. dev_err(dev->dev, "No irq found\n");
  1735. return -ENODEV;
  1736. }
  1737. /* Some core configurations has separate irqs for IN and OUT events */
  1738. dev->irqi = platform_get_irq(pdev, 1);
  1739. if (dev->irqi > 0) {
  1740. dev->irqo = platform_get_irq(pdev, 2);
  1741. if (dev->irqo <= 0) {
  1742. dev_err(dev->dev, "Found irqi but not irqo\n");
  1743. return -ENODEV;
  1744. }
  1745. } else {
  1746. dev->irqi = 0;
  1747. }
  1748. dev->gadget.name = driver_name;
  1749. dev->gadget.max_speed = USB_SPEED_HIGH;
  1750. dev->gadget.ops = &gr_ops;
  1751. dev->gadget.quirk_ep_out_aligned_size = true;
  1752. spin_lock_init(&dev->lock);
  1753. dev->regs = regs;
  1754. platform_set_drvdata(pdev, dev);
  1755. /* Determine number of endpoints and data interface mode */
  1756. status = gr_read32(&dev->regs->status);
  1757. dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
  1758. dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
  1759. if (!(status & GR_STATUS_DM)) {
  1760. dev_err(dev->dev, "Slave mode cores are not supported\n");
  1761. return -ENODEV;
  1762. }
  1763. /* --- Effects of the following calls might need explicit cleanup --- */
  1764. /* Create DMA pool for descriptors */
  1765. dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
  1766. sizeof(struct gr_dma_desc), 4, 0);
  1767. if (!dev->desc_pool) {
  1768. dev_err(dev->dev, "Could not allocate DMA pool");
  1769. return -ENOMEM;
  1770. }
  1771. spin_lock(&dev->lock);
  1772. /* Inside lock so that no gadget can use this udc until probe is done */
  1773. retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
  1774. if (retval) {
  1775. dev_err(dev->dev, "Could not add gadget udc");
  1776. goto out;
  1777. }
  1778. dev->added = 1;
  1779. retval = gr_udc_init(dev);
  1780. if (retval)
  1781. goto out;
  1782. gr_dfs_create(dev);
  1783. /* Clear all interrupt enables that might be left on since last boot */
  1784. gr_disable_interrupts_and_pullup(dev);
  1785. retval = gr_request_irq(dev, dev->irq);
  1786. if (retval) {
  1787. dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
  1788. goto out;
  1789. }
  1790. if (dev->irqi) {
  1791. retval = gr_request_irq(dev, dev->irqi);
  1792. if (retval) {
  1793. dev_err(dev->dev, "Failed to request irqi %d\n",
  1794. dev->irqi);
  1795. goto out;
  1796. }
  1797. retval = gr_request_irq(dev, dev->irqo);
  1798. if (retval) {
  1799. dev_err(dev->dev, "Failed to request irqo %d\n",
  1800. dev->irqo);
  1801. goto out;
  1802. }
  1803. }
  1804. if (dev->irqi)
  1805. dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
  1806. dev->irq, dev->irqi, dev->irqo);
  1807. else
  1808. dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
  1809. out:
  1810. spin_unlock(&dev->lock);
  1811. if (retval)
  1812. gr_remove(pdev);
  1813. return retval;
  1814. }
  1815. static struct of_device_id gr_match[] = {
  1816. {.name = "GAISLER_USBDC"},
  1817. {.name = "01_021"},
  1818. {},
  1819. };
  1820. MODULE_DEVICE_TABLE(of, gr_match);
  1821. static struct platform_driver gr_driver = {
  1822. .driver = {
  1823. .name = DRIVER_NAME,
  1824. .owner = THIS_MODULE,
  1825. .of_match_table = gr_match,
  1826. },
  1827. .probe = gr_probe,
  1828. .remove = gr_remove,
  1829. };
  1830. module_platform_driver(gr_driver);
  1831. MODULE_AUTHOR("Aeroflex Gaisler AB.");
  1832. MODULE_DESCRIPTION(DRIVER_DESC);
  1833. MODULE_LICENSE("GPL");