gr_udc.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238
  1. /*
  2. * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
  3. *
  4. * 2013 (c) Aeroflex Gaisler AB
  5. *
  6. * This driver supports GRUSBDC USB Device Controller cores available in the
  7. * GRLIB VHDL IP core library.
  8. *
  9. * Full documentation of the GRUSBDC core can be found here:
  10. * http://www.gaisler.com/products/grlib/grip.pdf
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the License, or (at your
  15. * option) any later version.
  16. *
  17. * Contributors:
  18. * - Andreas Larsson <andreas@gaisler.com>
  19. * - Marko Isomaki
  20. */
  21. /*
  22. * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
  23. * individually configurable to any of the four USB transfer types. This driver
  24. * only supports cores in DMA mode.
  25. */
  26. #include <linux/kernel.h>
  27. #include <linux/module.h>
  28. #include <linux/slab.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/errno.h>
  31. #include <linux/list.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/device.h>
  34. #include <linux/usb/ch9.h>
  35. #include <linux/usb/gadget.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/dmapool.h>
  38. #include <linux/debugfs.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/of_platform.h>
  41. #include <linux/of_irq.h>
  42. #include <linux/of_address.h>
  43. #include <asm/byteorder.h>
  44. #include "gr_udc.h"
  45. #define DRIVER_NAME "gr_udc"
  46. #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
  47. static const char driver_name[] = DRIVER_NAME;
  48. static const char driver_desc[] = DRIVER_DESC;
  49. #define gr_read32(x) (ioread32be((x)))
  50. #define gr_write32(x, v) (iowrite32be((v), (x)))
  51. /* USB speed and corresponding string calculated from status register value */
  52. #define GR_SPEED(status) \
  53. ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
  54. #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
  55. /* Size of hardware buffer calculated from epctrl register value */
  56. #define GR_BUFFER_SIZE(epctrl) \
  57. ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
  58. GR_EPCTRL_BUFSZ_SCALER)
  59. /* ---------------------------------------------------------------------- */
  60. /* Debug printout functionality */
  61. static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
  62. static const char *gr_ep0state_string(enum gr_ep0state state)
  63. {
  64. static const char *const names[] = {
  65. [GR_EP0_DISCONNECT] = "disconnect",
  66. [GR_EP0_SETUP] = "setup",
  67. [GR_EP0_IDATA] = "idata",
  68. [GR_EP0_ODATA] = "odata",
  69. [GR_EP0_ISTATUS] = "istatus",
  70. [GR_EP0_OSTATUS] = "ostatus",
  71. [GR_EP0_STALL] = "stall",
  72. [GR_EP0_SUSPEND] = "suspend",
  73. };
  74. if (state < 0 || state >= ARRAY_SIZE(names))
  75. return "UNKNOWN";
  76. return names[state];
  77. }
  78. #ifdef VERBOSE_DEBUG
  79. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  80. struct gr_request *req)
  81. {
  82. int buflen = ep->is_in ? req->req.length : req->req.actual;
  83. int rowlen = 32;
  84. int plen = min(rowlen, buflen);
  85. dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
  86. (buflen > plen ? " (truncated)" : ""));
  87. print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
  88. rowlen, 4, req->req.buf, plen, false);
  89. }
  90. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  91. u16 value, u16 index, u16 length)
  92. {
  93. dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
  94. type, request, value, index, length);
  95. }
  96. #else /* !VERBOSE_DEBUG */
  97. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  98. struct gr_request *req) {}
  99. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  100. u16 value, u16 index, u16 length) {}
  101. #endif /* VERBOSE_DEBUG */
  102. /* ---------------------------------------------------------------------- */
  103. /* Debugfs functionality */
  104. #ifdef CONFIG_USB_GADGET_DEBUG_FS
  105. static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
  106. {
  107. u32 epctrl = gr_read32(&ep->regs->epctrl);
  108. u32 epstat = gr_read32(&ep->regs->epstat);
  109. int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
  110. struct gr_request *req;
  111. seq_printf(seq, "%s:\n", ep->ep.name);
  112. seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
  113. seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
  114. seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
  115. seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
  116. seq_printf(seq, " dma_start = %d\n", ep->dma_start);
  117. seq_printf(seq, " stopped = %d\n", ep->stopped);
  118. seq_printf(seq, " wedged = %d\n", ep->wedged);
  119. seq_printf(seq, " callback = %d\n", ep->callback);
  120. seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
  121. seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
  122. if (mode == 1 || mode == 3)
  123. seq_printf(seq, " nt = %d\n",
  124. (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
  125. seq_printf(seq, " Buffer 0: %s %s%d\n",
  126. epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
  127. epstat & GR_EPSTAT_BS ? " " : "selected ",
  128. (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
  129. seq_printf(seq, " Buffer 1: %s %s%d\n",
  130. epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
  131. epstat & GR_EPSTAT_BS ? "selected " : " ",
  132. (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
  133. if (list_empty(&ep->queue)) {
  134. seq_puts(seq, " Queue: empty\n\n");
  135. return;
  136. }
  137. seq_puts(seq, " Queue:\n");
  138. list_for_each_entry(req, &ep->queue, queue) {
  139. struct gr_dma_desc *desc;
  140. struct gr_dma_desc *next;
  141. seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
  142. &req->req.buf, req->req.actual, req->req.length);
  143. next = req->first_desc;
  144. do {
  145. desc = next;
  146. next = desc->next_desc;
  147. seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
  148. desc == req->curr_desc ? 'c' : ' ',
  149. desc, desc->paddr, desc->ctrl, desc->data);
  150. } while (desc != req->last_desc);
  151. }
  152. seq_puts(seq, "\n");
  153. }
  154. static int gr_seq_show(struct seq_file *seq, void *v)
  155. {
  156. struct gr_udc *dev = seq->private;
  157. u32 control = gr_read32(&dev->regs->control);
  158. u32 status = gr_read32(&dev->regs->status);
  159. struct gr_ep *ep;
  160. seq_printf(seq, "usb state = %s\n",
  161. usb_state_string(dev->gadget.state));
  162. seq_printf(seq, "address = %d\n",
  163. (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
  164. seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
  165. seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
  166. seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
  167. seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
  168. seq_printf(seq, "test_mode = %d\n", dev->test_mode);
  169. seq_puts(seq, "\n");
  170. list_for_each_entry(ep, &dev->ep_list, ep_list)
  171. gr_seq_ep_show(seq, ep);
  172. return 0;
  173. }
  174. static int gr_dfs_open(struct inode *inode, struct file *file)
  175. {
  176. return single_open(file, gr_seq_show, inode->i_private);
  177. }
  178. static const struct file_operations gr_dfs_fops = {
  179. .owner = THIS_MODULE,
  180. .open = gr_dfs_open,
  181. .read = seq_read,
  182. .llseek = seq_lseek,
  183. .release = single_release,
  184. };
  185. static void gr_dfs_create(struct gr_udc *dev)
  186. {
  187. const char *name = "gr_udc_state";
  188. dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
  189. if (IS_ERR(dev->dfs_root)) {
  190. dev_err(dev->dev, "Failed to create debugfs directory\n");
  191. return;
  192. }
  193. dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root,
  194. dev, &gr_dfs_fops);
  195. if (IS_ERR(dev->dfs_state))
  196. dev_err(dev->dev, "Failed to create debugfs file %s\n", name);
  197. }
  198. static void gr_dfs_delete(struct gr_udc *dev)
  199. {
  200. /* Handles NULL and ERR pointers internally */
  201. debugfs_remove(dev->dfs_state);
  202. debugfs_remove(dev->dfs_root);
  203. }
  204. #else /* !CONFIG_USB_GADGET_DEBUG_FS */
  205. static void gr_dfs_create(struct gr_udc *dev) {}
  206. static void gr_dfs_delete(struct gr_udc *dev) {}
  207. #endif /* CONFIG_USB_GADGET_DEBUG_FS */
  208. /* ---------------------------------------------------------------------- */
  209. /* DMA and request handling */
  210. /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
  211. static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
  212. {
  213. dma_addr_t paddr;
  214. struct gr_dma_desc *dma_desc;
  215. dma_desc = dma_pool_alloc(ep->dev->desc_pool, gfp_flags, &paddr);
  216. if (!dma_desc) {
  217. dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
  218. return NULL;
  219. }
  220. memset(dma_desc, 0, sizeof(*dma_desc));
  221. dma_desc->paddr = paddr;
  222. return dma_desc;
  223. }
  224. static inline void gr_free_dma_desc(struct gr_udc *dev,
  225. struct gr_dma_desc *desc)
  226. {
  227. dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
  228. }
  229. /* Frees the chain of struct gr_dma_desc for the given request */
  230. static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
  231. {
  232. struct gr_dma_desc *desc;
  233. struct gr_dma_desc *next;
  234. next = req->first_desc;
  235. if (!next)
  236. return;
  237. do {
  238. desc = next;
  239. next = desc->next_desc;
  240. gr_free_dma_desc(dev, desc);
  241. } while (desc != req->last_desc);
  242. req->first_desc = NULL;
  243. req->curr_desc = NULL;
  244. req->last_desc = NULL;
  245. }
  246. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
  247. /*
  248. * Frees allocated resources and calls the appropriate completion function/setup
  249. * package handler for a finished request.
  250. *
  251. * Must be called with dev->lock held and irqs disabled.
  252. */
  253. static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
  254. int status)
  255. __releases(&dev->lock)
  256. __acquires(&dev->lock)
  257. {
  258. struct gr_udc *dev;
  259. list_del_init(&req->queue);
  260. if (likely(req->req.status == -EINPROGRESS))
  261. req->req.status = status;
  262. else
  263. status = req->req.status;
  264. dev = ep->dev;
  265. usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
  266. gr_free_dma_desc_chain(dev, req);
  267. if (ep->is_in) /* For OUT, actual gets updated bit by bit */
  268. req->req.actual = req->req.length;
  269. if (!status) {
  270. if (ep->is_in)
  271. gr_dbgprint_request("SENT", ep, req);
  272. else
  273. gr_dbgprint_request("RECV", ep, req);
  274. }
  275. /* Prevent changes to ep->queue during callback */
  276. ep->callback = 1;
  277. if (req == dev->ep0reqo && !status) {
  278. if (req->setup)
  279. gr_ep0_setup(dev, req);
  280. else
  281. dev_err(dev->dev,
  282. "Unexpected non setup packet on ep0in\n");
  283. } else if (req->req.complete) {
  284. spin_unlock(&dev->lock);
  285. req->req.complete(&ep->ep, &req->req);
  286. spin_lock(&dev->lock);
  287. }
  288. ep->callback = 0;
  289. }
  290. static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  291. {
  292. struct gr_request *req;
  293. req = kzalloc(sizeof(*req), gfp_flags);
  294. if (!req)
  295. return NULL;
  296. INIT_LIST_HEAD(&req->queue);
  297. return &req->req;
  298. }
  299. /*
  300. * Starts DMA for endpoint ep if there are requests in the queue.
  301. *
  302. * Must be called with dev->lock held and with !ep->stopped.
  303. */
  304. static void gr_start_dma(struct gr_ep *ep)
  305. {
  306. struct gr_request *req;
  307. u32 dmactrl;
  308. if (list_empty(&ep->queue)) {
  309. ep->dma_start = 0;
  310. return;
  311. }
  312. req = list_first_entry(&ep->queue, struct gr_request, queue);
  313. /* A descriptor should already have been allocated */
  314. BUG_ON(!req->curr_desc);
  315. wmb(); /* Make sure all is settled before handing it over to DMA */
  316. /* Set the descriptor pointer in the hardware */
  317. gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
  318. /* Announce available descriptors */
  319. dmactrl = gr_read32(&ep->regs->dmactrl);
  320. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
  321. ep->dma_start = 1;
  322. }
  323. /*
  324. * Finishes the first request in the ep's queue and, if available, starts the
  325. * next request in queue.
  326. *
  327. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  328. */
  329. static void gr_dma_advance(struct gr_ep *ep, int status)
  330. {
  331. struct gr_request *req;
  332. req = list_first_entry(&ep->queue, struct gr_request, queue);
  333. gr_finish_request(ep, req, status);
  334. gr_start_dma(ep); /* Regardless of ep->dma_start */
  335. }
  336. /*
  337. * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
  338. * transfer to be canceled and clears GR_DMACTRL_DA.
  339. *
  340. * Must be called with dev->lock held.
  341. */
  342. static void gr_abort_dma(struct gr_ep *ep)
  343. {
  344. u32 dmactrl;
  345. dmactrl = gr_read32(&ep->regs->dmactrl);
  346. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
  347. }
  348. /*
  349. * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
  350. * chain.
  351. *
  352. * Size is not used for OUT endpoints. Hardware can not be instructed to handle
  353. * smaller buffer than MAXPL in the OUT direction.
  354. */
  355. static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
  356. dma_addr_t data, unsigned size, gfp_t gfp_flags)
  357. {
  358. struct gr_dma_desc *desc;
  359. desc = gr_alloc_dma_desc(ep, gfp_flags);
  360. if (!desc)
  361. return -ENOMEM;
  362. desc->data = data;
  363. if (ep->is_in)
  364. desc->ctrl =
  365. (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
  366. else
  367. desc->ctrl = GR_DESC_OUT_CTRL_IE;
  368. if (!req->first_desc) {
  369. req->first_desc = desc;
  370. req->curr_desc = desc;
  371. } else {
  372. req->last_desc->next_desc = desc;
  373. req->last_desc->next = desc->paddr;
  374. req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
  375. }
  376. req->last_desc = desc;
  377. return 0;
  378. }
  379. /*
  380. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  381. * together covers req->req.length bytes of the buffer at DMA address
  382. * req->req.dma for the OUT direction.
  383. *
  384. * The first descriptor in the chain is enabled, the rest disabled. The
  385. * interrupt handler will later enable them one by one when needed so we can
  386. * find out when the transfer is finished. For OUT endpoints, all descriptors
  387. * therefore generate interrutps.
  388. */
  389. static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
  390. gfp_t gfp_flags)
  391. {
  392. u16 bytes_left; /* Bytes left to provide descriptors for */
  393. u16 bytes_used; /* Bytes accommodated for */
  394. int ret = 0;
  395. req->first_desc = NULL; /* Signals that no allocation is done yet */
  396. bytes_left = req->req.length;
  397. bytes_used = 0;
  398. while (bytes_left > 0) {
  399. dma_addr_t start = req->req.dma + bytes_used;
  400. u16 size = min(bytes_left, ep->bytes_per_buffer);
  401. /* Should not happen however - gr_queue stops such lengths */
  402. if (size < ep->bytes_per_buffer)
  403. dev_warn(ep->dev->dev,
  404. "Buffer overrun risk: %u < %u bytes/buffer\n",
  405. size, ep->bytes_per_buffer);
  406. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  407. if (ret)
  408. goto alloc_err;
  409. bytes_left -= size;
  410. bytes_used += size;
  411. }
  412. req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  413. return 0;
  414. alloc_err:
  415. gr_free_dma_desc_chain(ep->dev, req);
  416. return ret;
  417. }
  418. /*
  419. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  420. * together covers req->req.length bytes of the buffer at DMA address
  421. * req->req.dma for the IN direction.
  422. *
  423. * When more data is provided than the maximum payload size, the hardware splits
  424. * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
  425. * is always set to a multiple of the maximum payload (restricted to the valid
  426. * number of maximum payloads during high bandwidth isochronous or interrupt
  427. * transfers)
  428. *
  429. * All descriptors are enabled from the beginning and we only generate an
  430. * interrupt for the last one indicating that the entire request has been pushed
  431. * to hardware.
  432. */
  433. static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
  434. gfp_t gfp_flags)
  435. {
  436. u16 bytes_left; /* Bytes left in req to provide descriptors for */
  437. u16 bytes_used; /* Bytes in req accommodated for */
  438. int ret = 0;
  439. req->first_desc = NULL; /* Signals that no allocation is done yet */
  440. bytes_left = req->req.length;
  441. bytes_used = 0;
  442. do { /* Allow for zero length packets */
  443. dma_addr_t start = req->req.dma + bytes_used;
  444. u16 size = min(bytes_left, ep->bytes_per_buffer);
  445. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  446. if (ret)
  447. goto alloc_err;
  448. bytes_left -= size;
  449. bytes_used += size;
  450. } while (bytes_left > 0);
  451. /*
  452. * Send an extra zero length packet to indicate that no more data is
  453. * available when req->req.zero is set and the data length is even
  454. * multiples of ep->ep.maxpacket.
  455. */
  456. if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
  457. ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
  458. if (ret)
  459. goto alloc_err;
  460. }
  461. /*
  462. * For IN packets we only want to know when the last packet has been
  463. * transmitted (not just put into internal buffers).
  464. */
  465. req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
  466. return 0;
  467. alloc_err:
  468. gr_free_dma_desc_chain(ep->dev, req);
  469. return ret;
  470. }
  471. /* Must be called with dev->lock held */
  472. static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
  473. {
  474. struct gr_udc *dev = ep->dev;
  475. int ret;
  476. if (unlikely(!ep->ep.desc && ep->num != 0)) {
  477. dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
  478. return -EINVAL;
  479. }
  480. if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
  481. dev_err(dev->dev,
  482. "Invalid request for %s: buf=%p list_empty=%d\n",
  483. ep->ep.name, req->req.buf, list_empty(&req->queue));
  484. return -EINVAL;
  485. }
  486. /*
  487. * The DMA controller can not handle smaller OUT buffers than
  488. * maxpacket. It could lead to buffer overruns if unexpectedly long
  489. * packet are received.
  490. */
  491. if (!ep->is_in && (req->req.length % ep->ep.maxpacket) != 0) {
  492. dev_err(dev->dev,
  493. "OUT request length %d is not multiple of maxpacket\n",
  494. req->req.length);
  495. return -EMSGSIZE;
  496. }
  497. if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
  498. dev_err(dev->dev, "-ESHUTDOWN");
  499. return -ESHUTDOWN;
  500. }
  501. /* Can't touch registers when suspended */
  502. if (dev->ep0state == GR_EP0_SUSPEND) {
  503. dev_err(dev->dev, "-EBUSY");
  504. return -EBUSY;
  505. }
  506. /* Set up DMA mapping in case the caller didn't */
  507. ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
  508. if (ret) {
  509. dev_err(dev->dev, "usb_gadget_map_request");
  510. return ret;
  511. }
  512. if (ep->is_in)
  513. ret = gr_setup_in_desc_list(ep, req, gfp_flags);
  514. else
  515. ret = gr_setup_out_desc_list(ep, req, gfp_flags);
  516. if (ret)
  517. return ret;
  518. req->req.status = -EINPROGRESS;
  519. req->req.actual = 0;
  520. list_add_tail(&req->queue, &ep->queue);
  521. /* Start DMA if not started, otherwise interrupt handler handles it */
  522. if (!ep->dma_start && likely(!ep->stopped))
  523. gr_start_dma(ep);
  524. return 0;
  525. }
  526. /*
  527. * Queue a request from within the driver.
  528. *
  529. * Must be called with dev->lock held.
  530. */
  531. static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
  532. gfp_t gfp_flags)
  533. {
  534. if (ep->is_in)
  535. gr_dbgprint_request("RESP", ep, req);
  536. return gr_queue(ep, req, gfp_flags);
  537. }
  538. /* ---------------------------------------------------------------------- */
  539. /* General helper functions */
  540. /*
  541. * Dequeue ALL requests.
  542. *
  543. * Must be called with dev->lock held and irqs disabled.
  544. */
  545. static void gr_ep_nuke(struct gr_ep *ep)
  546. {
  547. struct gr_request *req;
  548. ep->stopped = 1;
  549. ep->dma_start = 0;
  550. gr_abort_dma(ep);
  551. while (!list_empty(&ep->queue)) {
  552. req = list_first_entry(&ep->queue, struct gr_request, queue);
  553. gr_finish_request(ep, req, -ESHUTDOWN);
  554. }
  555. }
  556. /*
  557. * Reset the hardware state of this endpoint.
  558. *
  559. * Must be called with dev->lock held.
  560. */
  561. static void gr_ep_reset(struct gr_ep *ep)
  562. {
  563. gr_write32(&ep->regs->epctrl, 0);
  564. gr_write32(&ep->regs->dmactrl, 0);
  565. ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
  566. ep->ep.desc = NULL;
  567. ep->stopped = 1;
  568. ep->dma_start = 0;
  569. }
  570. /*
  571. * Generate STALL on ep0in/out.
  572. *
  573. * Must be called with dev->lock held.
  574. */
  575. static void gr_control_stall(struct gr_udc *dev)
  576. {
  577. u32 epctrl;
  578. epctrl = gr_read32(&dev->epo[0].regs->epctrl);
  579. gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  580. epctrl = gr_read32(&dev->epi[0].regs->epctrl);
  581. gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  582. dev->ep0state = GR_EP0_STALL;
  583. }
  584. /*
  585. * Halts, halts and wedges, or clears halt for an endpoint.
  586. *
  587. * Must be called with dev->lock held.
  588. */
  589. static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
  590. {
  591. u32 epctrl;
  592. int retval = 0;
  593. if (ep->num && !ep->ep.desc)
  594. return -EINVAL;
  595. if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
  596. return -EOPNOTSUPP;
  597. /* Never actually halt ep0, and therefore never clear halt for ep0 */
  598. if (!ep->num) {
  599. if (halt && !fromhost) {
  600. /* ep0 halt from gadget - generate protocol stall */
  601. gr_control_stall(ep->dev);
  602. dev_dbg(ep->dev->dev, "EP: stall ep0\n");
  603. return 0;
  604. }
  605. return -EINVAL;
  606. }
  607. dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
  608. (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
  609. epctrl = gr_read32(&ep->regs->epctrl);
  610. if (halt) {
  611. /* Set HALT */
  612. gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
  613. ep->stopped = 1;
  614. if (wedge)
  615. ep->wedged = 1;
  616. } else {
  617. gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
  618. ep->stopped = 0;
  619. ep->wedged = 0;
  620. /* Things might have been queued up in the meantime */
  621. if (!ep->dma_start)
  622. gr_start_dma(ep);
  623. }
  624. return retval;
  625. }
  626. /* Must be called with dev->lock held */
  627. static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
  628. {
  629. if (dev->ep0state != value)
  630. dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
  631. gr_ep0state_string(value));
  632. dev->ep0state = value;
  633. }
  634. /*
  635. * Should only be called when endpoints can not generate interrupts.
  636. *
  637. * Must be called with dev->lock held.
  638. */
  639. static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
  640. {
  641. gr_write32(&dev->regs->control, 0);
  642. wmb(); /* Make sure that we do not deny one of our interrupts */
  643. dev->irq_enabled = 0;
  644. }
  645. /*
  646. * Stop all device activity and disable data line pullup.
  647. *
  648. * Must be called with dev->lock held and irqs disabled.
  649. */
  650. static void gr_stop_activity(struct gr_udc *dev)
  651. {
  652. struct gr_ep *ep;
  653. list_for_each_entry(ep, &dev->ep_list, ep_list)
  654. gr_ep_nuke(ep);
  655. gr_disable_interrupts_and_pullup(dev);
  656. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  657. usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
  658. }
  659. /* ---------------------------------------------------------------------- */
  660. /* ep0 setup packet handling */
  661. static void gr_ep0_testmode_complete(struct usb_ep *_ep,
  662. struct usb_request *_req)
  663. {
  664. struct gr_ep *ep;
  665. struct gr_udc *dev;
  666. u32 control;
  667. ep = container_of(_ep, struct gr_ep, ep);
  668. dev = ep->dev;
  669. spin_lock(&dev->lock);
  670. control = gr_read32(&dev->regs->control);
  671. control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
  672. gr_write32(&dev->regs->control, control);
  673. spin_unlock(&dev->lock);
  674. }
  675. static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
  676. {
  677. /* Nothing needs to be done here */
  678. }
  679. /*
  680. * Queue a response on ep0in.
  681. *
  682. * Must be called with dev->lock held.
  683. */
  684. static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
  685. void (*complete)(struct usb_ep *ep,
  686. struct usb_request *req))
  687. {
  688. u8 *reqbuf = dev->ep0reqi->req.buf;
  689. int status;
  690. int i;
  691. for (i = 0; i < length; i++)
  692. reqbuf[i] = buf[i];
  693. dev->ep0reqi->req.length = length;
  694. dev->ep0reqi->req.complete = complete;
  695. status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
  696. if (status < 0)
  697. dev_err(dev->dev,
  698. "Could not queue ep0in setup response: %d\n", status);
  699. return status;
  700. }
  701. /*
  702. * Queue a 2 byte response on ep0in.
  703. *
  704. * Must be called with dev->lock held.
  705. */
  706. static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
  707. {
  708. __le16 le_response = cpu_to_le16(response);
  709. return gr_ep0_respond(dev, (u8 *)&le_response, 2,
  710. gr_ep0_dummy_complete);
  711. }
  712. /*
  713. * Queue a ZLP response on ep0in.
  714. *
  715. * Must be called with dev->lock held.
  716. */
  717. static inline int gr_ep0_respond_empty(struct gr_udc *dev)
  718. {
  719. return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
  720. }
  721. /*
  722. * This is run when a SET_ADDRESS request is received. First writes
  723. * the new address to the control register which is updated internally
  724. * when the next IN packet is ACKED.
  725. *
  726. * Must be called with dev->lock held.
  727. */
  728. static void gr_set_address(struct gr_udc *dev, u8 address)
  729. {
  730. u32 control;
  731. control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
  732. control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
  733. control |= GR_CONTROL_SU;
  734. gr_write32(&dev->regs->control, control);
  735. }
  736. /*
  737. * Returns negative for STALL, 0 for successful handling and positive for
  738. * delegation.
  739. *
  740. * Must be called with dev->lock held.
  741. */
  742. static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
  743. u16 value, u16 index)
  744. {
  745. u16 response;
  746. u8 test;
  747. switch (request) {
  748. case USB_REQ_SET_ADDRESS:
  749. dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
  750. gr_set_address(dev, value & 0xff);
  751. if (value)
  752. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  753. else
  754. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  755. return gr_ep0_respond_empty(dev);
  756. case USB_REQ_GET_STATUS:
  757. /* Self powered | remote wakeup */
  758. response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
  759. return gr_ep0_respond_u16(dev, response);
  760. case USB_REQ_SET_FEATURE:
  761. switch (value) {
  762. case USB_DEVICE_REMOTE_WAKEUP:
  763. /* Allow remote wakeup */
  764. dev->remote_wakeup = 1;
  765. return gr_ep0_respond_empty(dev);
  766. case USB_DEVICE_TEST_MODE:
  767. /* The hardware does not support TEST_FORCE_EN */
  768. test = index >> 8;
  769. if (test >= TEST_J && test <= TEST_PACKET) {
  770. dev->test_mode = test;
  771. return gr_ep0_respond(dev, NULL, 0,
  772. gr_ep0_testmode_complete);
  773. }
  774. }
  775. break;
  776. case USB_REQ_CLEAR_FEATURE:
  777. switch (value) {
  778. case USB_DEVICE_REMOTE_WAKEUP:
  779. /* Disallow remote wakeup */
  780. dev->remote_wakeup = 0;
  781. return gr_ep0_respond_empty(dev);
  782. }
  783. break;
  784. }
  785. return 1; /* Delegate the rest */
  786. }
  787. /*
  788. * Returns negative for STALL, 0 for successful handling and positive for
  789. * delegation.
  790. *
  791. * Must be called with dev->lock held.
  792. */
  793. static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
  794. u16 value, u16 index)
  795. {
  796. if (dev->gadget.state != USB_STATE_CONFIGURED)
  797. return -1;
  798. /*
  799. * Should return STALL for invalid interfaces, but udc driver does not
  800. * know anything about that. However, many gadget drivers do not handle
  801. * GET_STATUS so we need to take care of that.
  802. */
  803. switch (request) {
  804. case USB_REQ_GET_STATUS:
  805. return gr_ep0_respond_u16(dev, 0x0000);
  806. case USB_REQ_SET_FEATURE:
  807. case USB_REQ_CLEAR_FEATURE:
  808. /*
  809. * No possible valid standard requests. Still let gadget drivers
  810. * have a go at it.
  811. */
  812. break;
  813. }
  814. return 1; /* Delegate the rest */
  815. }
  816. /*
  817. * Returns negative for STALL, 0 for successful handling and positive for
  818. * delegation.
  819. *
  820. * Must be called with dev->lock held.
  821. */
  822. static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
  823. u16 value, u16 index)
  824. {
  825. struct gr_ep *ep;
  826. int status;
  827. int halted;
  828. u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
  829. u8 is_in = index & USB_ENDPOINT_DIR_MASK;
  830. if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
  831. return -1;
  832. if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
  833. return -1;
  834. ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
  835. switch (request) {
  836. case USB_REQ_GET_STATUS:
  837. halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
  838. return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
  839. case USB_REQ_SET_FEATURE:
  840. switch (value) {
  841. case USB_ENDPOINT_HALT:
  842. status = gr_ep_halt_wedge(ep, 1, 0, 1);
  843. if (status >= 0)
  844. status = gr_ep0_respond_empty(dev);
  845. return status;
  846. }
  847. break;
  848. case USB_REQ_CLEAR_FEATURE:
  849. switch (value) {
  850. case USB_ENDPOINT_HALT:
  851. if (ep->wedged)
  852. return -1;
  853. status = gr_ep_halt_wedge(ep, 0, 0, 1);
  854. if (status >= 0)
  855. status = gr_ep0_respond_empty(dev);
  856. return status;
  857. }
  858. break;
  859. }
  860. return 1; /* Delegate the rest */
  861. }
  862. /* Must be called with dev->lock held */
  863. static void gr_ep0out_requeue(struct gr_udc *dev)
  864. {
  865. int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
  866. if (ret)
  867. dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
  868. ret);
  869. }
  870. /*
  871. * The main function dealing with setup requests on ep0.
  872. *
  873. * Must be called with dev->lock held and irqs disabled
  874. */
  875. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
  876. __releases(&dev->lock)
  877. __acquires(&dev->lock)
  878. {
  879. union {
  880. struct usb_ctrlrequest ctrl;
  881. u8 raw[8];
  882. u32 word[2];
  883. } u;
  884. u8 type;
  885. u8 request;
  886. u16 value;
  887. u16 index;
  888. u16 length;
  889. int i;
  890. int status;
  891. /* Restore from ep0 halt */
  892. if (dev->ep0state == GR_EP0_STALL) {
  893. gr_set_ep0state(dev, GR_EP0_SETUP);
  894. if (!req->req.actual)
  895. goto out;
  896. }
  897. if (dev->ep0state == GR_EP0_ISTATUS) {
  898. gr_set_ep0state(dev, GR_EP0_SETUP);
  899. if (req->req.actual > 0)
  900. dev_dbg(dev->dev,
  901. "Unexpected setup packet at state %s\n",
  902. gr_ep0state_string(GR_EP0_ISTATUS));
  903. else
  904. goto out; /* Got expected ZLP */
  905. } else if (dev->ep0state != GR_EP0_SETUP) {
  906. dev_info(dev->dev,
  907. "Unexpected ep0out request at state %s - stalling\n",
  908. gr_ep0state_string(dev->ep0state));
  909. gr_control_stall(dev);
  910. gr_set_ep0state(dev, GR_EP0_SETUP);
  911. goto out;
  912. } else if (!req->req.actual) {
  913. dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
  914. gr_ep0state_string(dev->ep0state));
  915. goto out;
  916. }
  917. /* Handle SETUP packet */
  918. for (i = 0; i < req->req.actual; i++)
  919. u.raw[i] = ((u8 *)req->req.buf)[i];
  920. type = u.ctrl.bRequestType;
  921. request = u.ctrl.bRequest;
  922. value = le16_to_cpu(u.ctrl.wValue);
  923. index = le16_to_cpu(u.ctrl.wIndex);
  924. length = le16_to_cpu(u.ctrl.wLength);
  925. gr_dbgprint_devreq(dev, type, request, value, index, length);
  926. /* Check for data stage */
  927. if (length) {
  928. if (type & USB_DIR_IN)
  929. gr_set_ep0state(dev, GR_EP0_IDATA);
  930. else
  931. gr_set_ep0state(dev, GR_EP0_ODATA);
  932. }
  933. status = 1; /* Positive status flags delegation */
  934. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  935. switch (type & USB_RECIP_MASK) {
  936. case USB_RECIP_DEVICE:
  937. status = gr_device_request(dev, type, request,
  938. value, index);
  939. break;
  940. case USB_RECIP_ENDPOINT:
  941. status = gr_endpoint_request(dev, type, request,
  942. value, index);
  943. break;
  944. case USB_RECIP_INTERFACE:
  945. status = gr_interface_request(dev, type, request,
  946. value, index);
  947. break;
  948. }
  949. }
  950. if (status > 0) {
  951. spin_unlock(&dev->lock);
  952. dev_vdbg(dev->dev, "DELEGATE\n");
  953. status = dev->driver->setup(&dev->gadget, &u.ctrl);
  954. spin_lock(&dev->lock);
  955. }
  956. /* Generate STALL on both ep0out and ep0in if requested */
  957. if (unlikely(status < 0)) {
  958. dev_vdbg(dev->dev, "STALL\n");
  959. gr_control_stall(dev);
  960. }
  961. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
  962. request == USB_REQ_SET_CONFIGURATION) {
  963. if (!value) {
  964. dev_dbg(dev->dev, "STATUS: deconfigured\n");
  965. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  966. } else if (status >= 0) {
  967. /* Not configured unless gadget OK:s it */
  968. dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
  969. usb_gadget_set_state(&dev->gadget,
  970. USB_STATE_CONFIGURED);
  971. }
  972. }
  973. /* Get ready for next stage */
  974. if (dev->ep0state == GR_EP0_ODATA)
  975. gr_set_ep0state(dev, GR_EP0_OSTATUS);
  976. else if (dev->ep0state == GR_EP0_IDATA)
  977. gr_set_ep0state(dev, GR_EP0_ISTATUS);
  978. else
  979. gr_set_ep0state(dev, GR_EP0_SETUP);
  980. out:
  981. gr_ep0out_requeue(dev);
  982. }
  983. /* ---------------------------------------------------------------------- */
  984. /* VBUS and USB reset handling */
  985. /* Must be called with dev->lock held and irqs disabled */
  986. static void gr_vbus_connected(struct gr_udc *dev, u32 status)
  987. {
  988. u32 control;
  989. dev->gadget.speed = GR_SPEED(status);
  990. usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
  991. /* Turn on full interrupts and pullup */
  992. control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
  993. GR_CONTROL_SP | GR_CONTROL_EP);
  994. gr_write32(&dev->regs->control, control);
  995. }
  996. /* Must be called with dev->lock held */
  997. static void gr_enable_vbus_detect(struct gr_udc *dev)
  998. {
  999. u32 status;
  1000. dev->irq_enabled = 1;
  1001. wmb(); /* Make sure we do not ignore an interrupt */
  1002. gr_write32(&dev->regs->control, GR_CONTROL_VI);
  1003. /* Take care of the case we are already plugged in at this point */
  1004. status = gr_read32(&dev->regs->status);
  1005. if (status & GR_STATUS_VB)
  1006. gr_vbus_connected(dev, status);
  1007. }
  1008. /* Must be called with dev->lock held and irqs disabled */
  1009. static void gr_vbus_disconnected(struct gr_udc *dev)
  1010. {
  1011. gr_stop_activity(dev);
  1012. /* Report disconnect */
  1013. if (dev->driver && dev->driver->disconnect) {
  1014. spin_unlock(&dev->lock);
  1015. dev->driver->disconnect(&dev->gadget);
  1016. spin_lock(&dev->lock);
  1017. }
  1018. gr_enable_vbus_detect(dev);
  1019. }
  1020. /* Must be called with dev->lock held and irqs disabled */
  1021. static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
  1022. {
  1023. gr_set_address(dev, 0);
  1024. gr_set_ep0state(dev, GR_EP0_SETUP);
  1025. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  1026. dev->gadget.speed = GR_SPEED(status);
  1027. gr_ep_nuke(&dev->epo[0]);
  1028. gr_ep_nuke(&dev->epi[0]);
  1029. dev->epo[0].stopped = 0;
  1030. dev->epi[0].stopped = 0;
  1031. gr_ep0out_requeue(dev);
  1032. }
  1033. /* ---------------------------------------------------------------------- */
  1034. /* Irq handling */
  1035. /*
  1036. * Handles interrupts from in endpoints. Returns whether something was handled.
  1037. *
  1038. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1039. */
  1040. static int gr_handle_in_ep(struct gr_ep *ep)
  1041. {
  1042. struct gr_request *req;
  1043. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1044. if (!req->last_desc)
  1045. return 0;
  1046. if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
  1047. return 0; /* Not put in hardware buffers yet */
  1048. if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
  1049. return 0; /* Not transmitted yet, still in hardware buffers */
  1050. /* Write complete */
  1051. gr_dma_advance(ep, 0);
  1052. return 1;
  1053. }
  1054. /*
  1055. * Handles interrupts from out endpoints. Returns whether something was handled.
  1056. *
  1057. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1058. */
  1059. static int gr_handle_out_ep(struct gr_ep *ep)
  1060. {
  1061. u32 ep_dmactrl;
  1062. u32 ctrl;
  1063. u16 len;
  1064. struct gr_request *req;
  1065. struct gr_udc *dev = ep->dev;
  1066. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1067. if (!req->curr_desc)
  1068. return 0;
  1069. ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
  1070. if (ctrl & GR_DESC_OUT_CTRL_EN)
  1071. return 0; /* Not received yet */
  1072. /* Read complete */
  1073. len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
  1074. req->req.actual += len;
  1075. if (ctrl & GR_DESC_OUT_CTRL_SE)
  1076. req->setup = 1;
  1077. if (len < ep->ep.maxpacket || req->req.actual == req->req.length) {
  1078. /* Short packet or the expected size - we are done */
  1079. if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
  1080. /*
  1081. * Send a status stage ZLP to ack the DATA stage in the
  1082. * OUT direction. This needs to be done before
  1083. * gr_dma_advance as that can lead to a call to
  1084. * ep0_setup that can change dev->ep0state.
  1085. */
  1086. gr_ep0_respond_empty(dev);
  1087. gr_set_ep0state(dev, GR_EP0_SETUP);
  1088. }
  1089. gr_dma_advance(ep, 0);
  1090. } else {
  1091. /* Not done yet. Enable the next descriptor to receive more. */
  1092. req->curr_desc = req->curr_desc->next_desc;
  1093. req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  1094. ep_dmactrl = gr_read32(&ep->regs->dmactrl);
  1095. gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
  1096. }
  1097. return 1;
  1098. }
  1099. /*
  1100. * Handle state changes. Returns whether something was handled.
  1101. *
  1102. * Must be called with dev->lock held and irqs disabled.
  1103. */
  1104. static int gr_handle_state_changes(struct gr_udc *dev)
  1105. {
  1106. u32 status = gr_read32(&dev->regs->status);
  1107. int handled = 0;
  1108. int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
  1109. dev->gadget.state == USB_STATE_ATTACHED);
  1110. /* VBUS valid detected */
  1111. if (!powstate && (status & GR_STATUS_VB)) {
  1112. dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
  1113. gr_vbus_connected(dev, status);
  1114. handled = 1;
  1115. }
  1116. /* Disconnect */
  1117. if (powstate && !(status & GR_STATUS_VB)) {
  1118. dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
  1119. gr_vbus_disconnected(dev);
  1120. handled = 1;
  1121. }
  1122. /* USB reset detected */
  1123. if (status & GR_STATUS_UR) {
  1124. dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
  1125. GR_SPEED_STR(status));
  1126. gr_write32(&dev->regs->status, GR_STATUS_UR);
  1127. gr_udc_usbreset(dev, status);
  1128. handled = 1;
  1129. }
  1130. /* Speed change */
  1131. if (dev->gadget.speed != GR_SPEED(status)) {
  1132. dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
  1133. GR_SPEED_STR(status));
  1134. dev->gadget.speed = GR_SPEED(status);
  1135. handled = 1;
  1136. }
  1137. /* Going into suspend */
  1138. if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
  1139. dev_dbg(dev->dev, "STATUS: USB suspend\n");
  1140. gr_set_ep0state(dev, GR_EP0_SUSPEND);
  1141. dev->suspended_from = dev->gadget.state;
  1142. usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
  1143. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1144. dev->driver && dev->driver->suspend) {
  1145. spin_unlock(&dev->lock);
  1146. dev->driver->suspend(&dev->gadget);
  1147. spin_lock(&dev->lock);
  1148. }
  1149. handled = 1;
  1150. }
  1151. /* Coming out of suspend */
  1152. if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
  1153. dev_dbg(dev->dev, "STATUS: USB resume\n");
  1154. if (dev->suspended_from == USB_STATE_POWERED)
  1155. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1156. else
  1157. gr_set_ep0state(dev, GR_EP0_SETUP);
  1158. usb_gadget_set_state(&dev->gadget, dev->suspended_from);
  1159. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1160. dev->driver && dev->driver->resume) {
  1161. spin_unlock(&dev->lock);
  1162. dev->driver->resume(&dev->gadget);
  1163. spin_lock(&dev->lock);
  1164. }
  1165. handled = 1;
  1166. }
  1167. return handled;
  1168. }
  1169. /* Non-interrupt context irq handler */
  1170. static irqreturn_t gr_irq_handler(int irq, void *_dev)
  1171. {
  1172. struct gr_udc *dev = _dev;
  1173. struct gr_ep *ep;
  1174. int handled = 0;
  1175. int i;
  1176. unsigned long flags;
  1177. spin_lock_irqsave(&dev->lock, flags);
  1178. if (!dev->irq_enabled)
  1179. goto out;
  1180. /*
  1181. * Check IN ep interrupts. We check these before the OUT eps because
  1182. * some gadgets reuse the request that might already be currently
  1183. * outstanding and needs to be completed (mainly setup requests).
  1184. */
  1185. for (i = 0; i < dev->nepi; i++) {
  1186. ep = &dev->epi[i];
  1187. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1188. handled = gr_handle_in_ep(ep) || handled;
  1189. }
  1190. /* Check OUT ep interrupts */
  1191. for (i = 0; i < dev->nepo; i++) {
  1192. ep = &dev->epo[i];
  1193. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1194. handled = gr_handle_out_ep(ep) || handled;
  1195. }
  1196. /* Check status interrupts */
  1197. handled = gr_handle_state_changes(dev) || handled;
  1198. /*
  1199. * Check AMBA DMA errors. Only check if we didn't find anything else to
  1200. * handle because this shouldn't happen if we did everything right.
  1201. */
  1202. if (!handled) {
  1203. list_for_each_entry(ep, &dev->ep_list, ep_list) {
  1204. if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
  1205. dev_err(dev->dev,
  1206. "AMBA Error occurred for %s\n",
  1207. ep->ep.name);
  1208. handled = 1;
  1209. }
  1210. }
  1211. }
  1212. out:
  1213. spin_unlock_irqrestore(&dev->lock, flags);
  1214. return handled ? IRQ_HANDLED : IRQ_NONE;
  1215. }
  1216. /* Interrupt context irq handler */
  1217. static irqreturn_t gr_irq(int irq, void *_dev)
  1218. {
  1219. struct gr_udc *dev = _dev;
  1220. if (!dev->irq_enabled)
  1221. return IRQ_NONE;
  1222. return IRQ_WAKE_THREAD;
  1223. }
  1224. /* ---------------------------------------------------------------------- */
  1225. /* USB ep ops */
  1226. /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
  1227. static int gr_ep_enable(struct usb_ep *_ep,
  1228. const struct usb_endpoint_descriptor *desc)
  1229. {
  1230. struct gr_udc *dev;
  1231. struct gr_ep *ep;
  1232. u8 mode;
  1233. u8 nt;
  1234. u16 max;
  1235. u16 buffer_size = 0;
  1236. u32 epctrl;
  1237. ep = container_of(_ep, struct gr_ep, ep);
  1238. if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
  1239. return -EINVAL;
  1240. dev = ep->dev;
  1241. /* 'ep0' IN and OUT are reserved */
  1242. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1243. return -EINVAL;
  1244. if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
  1245. return -ESHUTDOWN;
  1246. /* Make sure we are clear for enabling */
  1247. epctrl = gr_read32(&ep->regs->epctrl);
  1248. if (epctrl & GR_EPCTRL_EV)
  1249. return -EBUSY;
  1250. /* Check that directions match */
  1251. if (!ep->is_in != !usb_endpoint_dir_in(desc))
  1252. return -EINVAL;
  1253. /* Check ep num */
  1254. if ((!ep->is_in && ep->num >= dev->nepo) ||
  1255. (ep->is_in && ep->num >= dev->nepi))
  1256. return -EINVAL;
  1257. if (usb_endpoint_xfer_control(desc)) {
  1258. mode = 0;
  1259. } else if (usb_endpoint_xfer_isoc(desc)) {
  1260. mode = 1;
  1261. } else if (usb_endpoint_xfer_bulk(desc)) {
  1262. mode = 2;
  1263. } else if (usb_endpoint_xfer_int(desc)) {
  1264. mode = 3;
  1265. } else {
  1266. dev_err(dev->dev, "Unknown transfer type for %s\n",
  1267. ep->ep.name);
  1268. return -EINVAL;
  1269. }
  1270. /*
  1271. * Bits 10-0 set the max payload. 12-11 set the number of
  1272. * additional transactions.
  1273. */
  1274. max = 0x7ff & usb_endpoint_maxp(desc);
  1275. nt = 0x3 & (usb_endpoint_maxp(desc) >> 11);
  1276. buffer_size = GR_BUFFER_SIZE(epctrl);
  1277. if (nt && (mode == 0 || mode == 2)) {
  1278. dev_err(dev->dev,
  1279. "%s mode: multiple trans./microframe not valid\n",
  1280. (mode == 2 ? "Bulk" : "Control"));
  1281. return -EINVAL;
  1282. } else if (nt == 0x11) {
  1283. dev_err(dev->dev, "Invalid value for trans./microframe\n");
  1284. return -EINVAL;
  1285. } else if ((nt + 1) * max > buffer_size) {
  1286. dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
  1287. buffer_size, (nt + 1), max);
  1288. return -EINVAL;
  1289. } else if (max == 0) {
  1290. dev_err(dev->dev, "Max payload cannot be set to 0\n");
  1291. return -EINVAL;
  1292. }
  1293. spin_lock(&ep->dev->lock);
  1294. if (!ep->stopped) {
  1295. spin_unlock(&ep->dev->lock);
  1296. return -EBUSY;
  1297. }
  1298. ep->stopped = 0;
  1299. ep->wedged = 0;
  1300. ep->ep.desc = desc;
  1301. ep->ep.maxpacket = max;
  1302. ep->dma_start = 0;
  1303. if (nt) {
  1304. /*
  1305. * Maximum possible size of all payloads in one microframe
  1306. * regardless of direction when using high-bandwidth mode.
  1307. */
  1308. ep->bytes_per_buffer = (nt + 1) * max;
  1309. } else if (ep->is_in) {
  1310. /*
  1311. * The biggest multiple of maximum packet size that fits into
  1312. * the buffer. The hardware will split up into many packets in
  1313. * the IN direction.
  1314. */
  1315. ep->bytes_per_buffer = (buffer_size / max) * max;
  1316. } else {
  1317. /*
  1318. * Only single packets will be placed the buffers in the OUT
  1319. * direction.
  1320. */
  1321. ep->bytes_per_buffer = max;
  1322. }
  1323. epctrl = (max << GR_EPCTRL_MAXPL_POS)
  1324. | (nt << GR_EPCTRL_NT_POS)
  1325. | (mode << GR_EPCTRL_TT_POS)
  1326. | GR_EPCTRL_EV;
  1327. if (ep->is_in)
  1328. epctrl |= GR_EPCTRL_PI;
  1329. gr_write32(&ep->regs->epctrl, epctrl);
  1330. gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
  1331. spin_unlock(&ep->dev->lock);
  1332. dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
  1333. ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
  1334. return 0;
  1335. }
  1336. /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
  1337. static int gr_ep_disable(struct usb_ep *_ep)
  1338. {
  1339. struct gr_ep *ep;
  1340. struct gr_udc *dev;
  1341. unsigned long flags;
  1342. ep = container_of(_ep, struct gr_ep, ep);
  1343. if (!_ep || !ep->ep.desc)
  1344. return -ENODEV;
  1345. dev = ep->dev;
  1346. /* 'ep0' IN and OUT are reserved */
  1347. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1348. return -EINVAL;
  1349. if (dev->ep0state == GR_EP0_SUSPEND)
  1350. return -EBUSY;
  1351. dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
  1352. spin_lock_irqsave(&dev->lock, flags);
  1353. gr_ep_nuke(ep);
  1354. gr_ep_reset(ep);
  1355. ep->ep.desc = NULL;
  1356. spin_unlock_irqrestore(&dev->lock, flags);
  1357. return 0;
  1358. }
  1359. /*
  1360. * Frees a request, but not any DMA buffers associated with it
  1361. * (gr_finish_request should already have taken care of that).
  1362. */
  1363. static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
  1364. {
  1365. struct gr_request *req;
  1366. if (!_ep || !_req)
  1367. return;
  1368. req = container_of(_req, struct gr_request, req);
  1369. /* Leads to memory leak */
  1370. WARN(!list_empty(&req->queue),
  1371. "request not dequeued properly before freeing\n");
  1372. kfree(req);
  1373. }
  1374. /* Queue a request from the gadget */
  1375. static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
  1376. gfp_t gfp_flags)
  1377. {
  1378. struct gr_ep *ep;
  1379. struct gr_request *req;
  1380. struct gr_udc *dev;
  1381. int ret;
  1382. if (unlikely(!_ep || !_req))
  1383. return -EINVAL;
  1384. ep = container_of(_ep, struct gr_ep, ep);
  1385. req = container_of(_req, struct gr_request, req);
  1386. dev = ep->dev;
  1387. spin_lock(&ep->dev->lock);
  1388. /*
  1389. * The ep0 pointer in the gadget struct is used both for ep0in and
  1390. * ep0out. In a data stage in the out direction ep0out needs to be used
  1391. * instead of the default ep0in. Completion functions might use
  1392. * driver_data, so that needs to be copied as well.
  1393. */
  1394. if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
  1395. ep = &dev->epo[0];
  1396. ep->ep.driver_data = dev->epi[0].ep.driver_data;
  1397. }
  1398. if (ep->is_in)
  1399. gr_dbgprint_request("EXTERN", ep, req);
  1400. ret = gr_queue(ep, req, gfp_flags);
  1401. spin_unlock(&ep->dev->lock);
  1402. return ret;
  1403. }
  1404. /* Dequeue JUST ONE request */
  1405. static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  1406. {
  1407. struct gr_request *req;
  1408. struct gr_ep *ep;
  1409. struct gr_udc *dev;
  1410. int ret = 0;
  1411. unsigned long flags;
  1412. ep = container_of(_ep, struct gr_ep, ep);
  1413. if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
  1414. return -EINVAL;
  1415. dev = ep->dev;
  1416. if (!dev->driver)
  1417. return -ESHUTDOWN;
  1418. /* We can't touch (DMA) registers when suspended */
  1419. if (dev->ep0state == GR_EP0_SUSPEND)
  1420. return -EBUSY;
  1421. spin_lock_irqsave(&dev->lock, flags);
  1422. /* Make sure it's actually queued on this endpoint */
  1423. list_for_each_entry(req, &ep->queue, queue) {
  1424. if (&req->req == _req)
  1425. break;
  1426. }
  1427. if (&req->req != _req) {
  1428. ret = -EINVAL;
  1429. goto out;
  1430. }
  1431. if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
  1432. /* This request is currently being processed */
  1433. gr_abort_dma(ep);
  1434. if (ep->stopped)
  1435. gr_finish_request(ep, req, -ECONNRESET);
  1436. else
  1437. gr_dma_advance(ep, -ECONNRESET);
  1438. } else if (!list_empty(&req->queue)) {
  1439. /* Not being processed - gr_finish_request dequeues it */
  1440. gr_finish_request(ep, req, -ECONNRESET);
  1441. } else {
  1442. ret = -EOPNOTSUPP;
  1443. }
  1444. out:
  1445. spin_unlock_irqrestore(&dev->lock, flags);
  1446. return ret;
  1447. }
  1448. /* Helper for gr_set_halt and gr_set_wedge */
  1449. static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  1450. {
  1451. int ret;
  1452. struct gr_ep *ep;
  1453. if (!_ep)
  1454. return -ENODEV;
  1455. ep = container_of(_ep, struct gr_ep, ep);
  1456. spin_lock(&ep->dev->lock);
  1457. /* Halting an IN endpoint should fail if queue is not empty */
  1458. if (halt && ep->is_in && !list_empty(&ep->queue)) {
  1459. ret = -EAGAIN;
  1460. goto out;
  1461. }
  1462. ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
  1463. out:
  1464. spin_unlock(&ep->dev->lock);
  1465. return ret;
  1466. }
  1467. /* Halt endpoint */
  1468. static int gr_set_halt(struct usb_ep *_ep, int halt)
  1469. {
  1470. return gr_set_halt_wedge(_ep, halt, 0);
  1471. }
  1472. /* Halt and wedge endpoint */
  1473. static int gr_set_wedge(struct usb_ep *_ep)
  1474. {
  1475. return gr_set_halt_wedge(_ep, 1, 1);
  1476. }
  1477. /*
  1478. * Return the total number of bytes currently stored in the internal buffers of
  1479. * the endpoint.
  1480. */
  1481. static int gr_fifo_status(struct usb_ep *_ep)
  1482. {
  1483. struct gr_ep *ep;
  1484. u32 epstat;
  1485. u32 bytes = 0;
  1486. if (!_ep)
  1487. return -ENODEV;
  1488. ep = container_of(_ep, struct gr_ep, ep);
  1489. epstat = gr_read32(&ep->regs->epstat);
  1490. if (epstat & GR_EPSTAT_B0)
  1491. bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
  1492. if (epstat & GR_EPSTAT_B1)
  1493. bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
  1494. return bytes;
  1495. }
  1496. /* Empty data from internal buffers of an endpoint. */
  1497. static void gr_fifo_flush(struct usb_ep *_ep)
  1498. {
  1499. struct gr_ep *ep;
  1500. u32 epctrl;
  1501. if (!_ep)
  1502. return;
  1503. ep = container_of(_ep, struct gr_ep, ep);
  1504. dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
  1505. spin_lock(&ep->dev->lock);
  1506. epctrl = gr_read32(&ep->regs->epctrl);
  1507. epctrl |= GR_EPCTRL_CB;
  1508. gr_write32(&ep->regs->epctrl, epctrl);
  1509. spin_unlock(&ep->dev->lock);
  1510. }
  1511. static struct usb_ep_ops gr_ep_ops = {
  1512. .enable = gr_ep_enable,
  1513. .disable = gr_ep_disable,
  1514. .alloc_request = gr_alloc_request,
  1515. .free_request = gr_free_request,
  1516. .queue = gr_queue_ext,
  1517. .dequeue = gr_dequeue,
  1518. .set_halt = gr_set_halt,
  1519. .set_wedge = gr_set_wedge,
  1520. .fifo_status = gr_fifo_status,
  1521. .fifo_flush = gr_fifo_flush,
  1522. };
  1523. /* ---------------------------------------------------------------------- */
  1524. /* USB Gadget ops */
  1525. static int gr_get_frame(struct usb_gadget *_gadget)
  1526. {
  1527. struct gr_udc *dev;
  1528. if (!_gadget)
  1529. return -ENODEV;
  1530. dev = container_of(_gadget, struct gr_udc, gadget);
  1531. return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
  1532. }
  1533. static int gr_wakeup(struct usb_gadget *_gadget)
  1534. {
  1535. struct gr_udc *dev;
  1536. if (!_gadget)
  1537. return -ENODEV;
  1538. dev = container_of(_gadget, struct gr_udc, gadget);
  1539. /* Remote wakeup feature not enabled by host*/
  1540. if (!dev->remote_wakeup)
  1541. return -EINVAL;
  1542. spin_lock(&dev->lock);
  1543. gr_write32(&dev->regs->control,
  1544. gr_read32(&dev->regs->control) | GR_CONTROL_RW);
  1545. spin_unlock(&dev->lock);
  1546. return 0;
  1547. }
  1548. static int gr_pullup(struct usb_gadget *_gadget, int is_on)
  1549. {
  1550. struct gr_udc *dev;
  1551. u32 control;
  1552. if (!_gadget)
  1553. return -ENODEV;
  1554. dev = container_of(_gadget, struct gr_udc, gadget);
  1555. spin_lock(&dev->lock);
  1556. control = gr_read32(&dev->regs->control);
  1557. if (is_on)
  1558. control |= GR_CONTROL_EP;
  1559. else
  1560. control &= ~GR_CONTROL_EP;
  1561. gr_write32(&dev->regs->control, control);
  1562. spin_unlock(&dev->lock);
  1563. return 0;
  1564. }
  1565. static int gr_udc_start(struct usb_gadget *gadget,
  1566. struct usb_gadget_driver *driver)
  1567. {
  1568. struct gr_udc *dev = to_gr_udc(gadget);
  1569. spin_lock(&dev->lock);
  1570. /* Hook up the driver */
  1571. driver->driver.bus = NULL;
  1572. dev->driver = driver;
  1573. /* Get ready for host detection */
  1574. gr_enable_vbus_detect(dev);
  1575. spin_unlock(&dev->lock);
  1576. dev_info(dev->dev, "Started with gadget driver '%s'\n",
  1577. driver->driver.name);
  1578. return 0;
  1579. }
  1580. static int gr_udc_stop(struct usb_gadget *gadget,
  1581. struct usb_gadget_driver *driver)
  1582. {
  1583. struct gr_udc *dev = to_gr_udc(gadget);
  1584. unsigned long flags;
  1585. spin_lock_irqsave(&dev->lock, flags);
  1586. dev->driver = NULL;
  1587. gr_stop_activity(dev);
  1588. spin_unlock_irqrestore(&dev->lock, flags);
  1589. dev_info(dev->dev, "Stopped\n");
  1590. return 0;
  1591. }
  1592. static const struct usb_gadget_ops gr_ops = {
  1593. .get_frame = gr_get_frame,
  1594. .wakeup = gr_wakeup,
  1595. .pullup = gr_pullup,
  1596. .udc_start = gr_udc_start,
  1597. .udc_stop = gr_udc_stop,
  1598. /* Other operations not supported */
  1599. };
  1600. /* ---------------------------------------------------------------------- */
  1601. /* Module probe, removal and of-matching */
  1602. static const char * const onames[] = {
  1603. "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
  1604. "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
  1605. "ep12out", "ep13out", "ep14out", "ep15out"
  1606. };
  1607. static const char * const inames[] = {
  1608. "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
  1609. "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
  1610. "ep12in", "ep13in", "ep14in", "ep15in"
  1611. };
  1612. /* Must be called with dev->lock held */
  1613. static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
  1614. {
  1615. struct gr_ep *ep;
  1616. struct gr_request *req;
  1617. struct usb_request *_req;
  1618. void *buf;
  1619. if (is_in) {
  1620. ep = &dev->epi[num];
  1621. ep->ep.name = inames[num];
  1622. ep->regs = &dev->regs->epi[num];
  1623. } else {
  1624. ep = &dev->epo[num];
  1625. ep->ep.name = onames[num];
  1626. ep->regs = &dev->regs->epo[num];
  1627. }
  1628. gr_ep_reset(ep);
  1629. ep->num = num;
  1630. ep->is_in = is_in;
  1631. ep->dev = dev;
  1632. ep->ep.ops = &gr_ep_ops;
  1633. INIT_LIST_HEAD(&ep->queue);
  1634. if (num == 0) {
  1635. _req = gr_alloc_request(&ep->ep, GFP_KERNEL);
  1636. buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_KERNEL);
  1637. if (!_req || !buf) {
  1638. /* possible _req freed by gr_probe via gr_remove */
  1639. return -ENOMEM;
  1640. }
  1641. req = container_of(_req, struct gr_request, req);
  1642. req->req.buf = buf;
  1643. req->req.length = MAX_CTRL_PL_SIZE;
  1644. if (is_in)
  1645. dev->ep0reqi = req; /* Complete gets set as used */
  1646. else
  1647. dev->ep0reqo = req; /* Completion treated separately */
  1648. usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
  1649. ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
  1650. } else {
  1651. usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
  1652. list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
  1653. }
  1654. list_add_tail(&ep->ep_list, &dev->ep_list);
  1655. return 0;
  1656. }
  1657. /* Must be called with dev->lock held */
  1658. static int gr_udc_init(struct gr_udc *dev)
  1659. {
  1660. struct device_node *np = dev->dev->of_node;
  1661. u32 epctrl_val;
  1662. u32 dmactrl_val;
  1663. int i;
  1664. int ret = 0;
  1665. u32 *bufsizes;
  1666. u32 bufsize;
  1667. int len;
  1668. gr_set_address(dev, 0);
  1669. INIT_LIST_HEAD(&dev->gadget.ep_list);
  1670. dev->gadget.speed = USB_SPEED_UNKNOWN;
  1671. dev->gadget.ep0 = &dev->epi[0].ep;
  1672. INIT_LIST_HEAD(&dev->ep_list);
  1673. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1674. bufsizes = (u32 *)of_get_property(np, "epobufsizes", &len);
  1675. len /= sizeof(u32);
  1676. for (i = 0; i < dev->nepo; i++) {
  1677. bufsize = (bufsizes && i < len) ? bufsizes[i] : 1024;
  1678. ret = gr_ep_init(dev, i, 0, bufsize);
  1679. if (ret)
  1680. return ret;
  1681. }
  1682. bufsizes = (u32 *)of_get_property(np, "epibufsizes", &len);
  1683. len /= sizeof(u32);
  1684. for (i = 0; i < dev->nepi; i++) {
  1685. bufsize = (bufsizes && i < len) ? bufsizes[i] : 1024;
  1686. ret = gr_ep_init(dev, i, 1, bufsize);
  1687. if (ret)
  1688. return ret;
  1689. }
  1690. /* Must be disabled by default */
  1691. dev->remote_wakeup = 0;
  1692. /* Enable ep0out and ep0in */
  1693. epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
  1694. dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
  1695. gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
  1696. gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
  1697. gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
  1698. gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
  1699. return 0;
  1700. }
  1701. static int gr_remove(struct platform_device *ofdev)
  1702. {
  1703. struct gr_udc *dev = dev_get_drvdata(&ofdev->dev);
  1704. if (dev->added)
  1705. usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
  1706. if (dev->driver)
  1707. return -EBUSY;
  1708. gr_dfs_delete(dev);
  1709. if (dev->desc_pool)
  1710. dma_pool_destroy(dev->desc_pool);
  1711. dev_set_drvdata(&ofdev->dev, NULL);
  1712. gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
  1713. gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
  1714. return 0;
  1715. }
  1716. static int gr_request_irq(struct gr_udc *dev, int irq)
  1717. {
  1718. return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
  1719. IRQF_SHARED, driver_name, dev);
  1720. }
  1721. static int gr_probe(struct platform_device *ofdev)
  1722. {
  1723. struct gr_udc *dev;
  1724. struct resource *res;
  1725. struct gr_regs __iomem *regs;
  1726. int retval;
  1727. u32 status;
  1728. dev = devm_kzalloc(&ofdev->dev, sizeof(*dev), GFP_KERNEL);
  1729. if (!dev)
  1730. return -ENOMEM;
  1731. dev->dev = &ofdev->dev;
  1732. res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
  1733. regs = devm_ioremap_resource(dev->dev, res);
  1734. if (IS_ERR(regs))
  1735. return PTR_ERR(regs);
  1736. dev->irq = irq_of_parse_and_map(dev->dev->of_node, 0);
  1737. if (!dev->irq) {
  1738. dev_err(dev->dev, "No irq found\n");
  1739. return -ENODEV;
  1740. }
  1741. /* Some core configurations has separate irqs for IN and OUT events */
  1742. dev->irqi = irq_of_parse_and_map(dev->dev->of_node, 1);
  1743. if (dev->irqi) {
  1744. dev->irqo = irq_of_parse_and_map(dev->dev->of_node, 2);
  1745. if (!dev->irqo) {
  1746. dev_err(dev->dev, "Found irqi but not irqo\n");
  1747. return -ENODEV;
  1748. }
  1749. }
  1750. dev->gadget.name = driver_name;
  1751. dev->gadget.max_speed = USB_SPEED_HIGH;
  1752. dev->gadget.ops = &gr_ops;
  1753. dev->gadget.quirk_ep_out_aligned_size = true;
  1754. spin_lock_init(&dev->lock);
  1755. dev->regs = regs;
  1756. dev_set_drvdata(&ofdev->dev, dev);
  1757. /* Determine number of endpoints and data interface mode */
  1758. status = gr_read32(&dev->regs->status);
  1759. dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
  1760. dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
  1761. if (!(status & GR_STATUS_DM)) {
  1762. dev_err(dev->dev, "Slave mode cores are not supported\n");
  1763. return -ENODEV;
  1764. }
  1765. /* --- Effects of the following calls might need explicit cleanup --- */
  1766. /* Create DMA pool for descriptors */
  1767. dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
  1768. sizeof(struct gr_dma_desc), 4, 0);
  1769. if (!dev->desc_pool) {
  1770. dev_err(dev->dev, "Could not allocate DMA pool");
  1771. return -ENOMEM;
  1772. }
  1773. spin_lock(&dev->lock);
  1774. /* Inside lock so that no gadget can use this udc until probe is done */
  1775. retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
  1776. if (retval) {
  1777. dev_err(dev->dev, "Could not add gadget udc");
  1778. goto out;
  1779. }
  1780. dev->added = 1;
  1781. retval = gr_udc_init(dev);
  1782. if (retval)
  1783. goto out;
  1784. gr_dfs_create(dev);
  1785. /* Clear all interrupt enables that might be left on since last boot */
  1786. gr_disable_interrupts_and_pullup(dev);
  1787. retval = gr_request_irq(dev, dev->irq);
  1788. if (retval) {
  1789. dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
  1790. goto out;
  1791. }
  1792. if (dev->irqi) {
  1793. retval = gr_request_irq(dev, dev->irqi);
  1794. if (retval) {
  1795. dev_err(dev->dev, "Failed to request irqi %d\n",
  1796. dev->irqi);
  1797. goto out;
  1798. }
  1799. retval = gr_request_irq(dev, dev->irqo);
  1800. if (retval) {
  1801. dev_err(dev->dev, "Failed to request irqo %d\n",
  1802. dev->irqo);
  1803. goto out;
  1804. }
  1805. }
  1806. if (dev->irqi)
  1807. dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
  1808. dev->irq, dev->irqi, dev->irqo);
  1809. else
  1810. dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
  1811. out:
  1812. spin_unlock(&dev->lock);
  1813. if (retval)
  1814. gr_remove(ofdev);
  1815. return retval;
  1816. }
  1817. static struct of_device_id gr_match[] = {
  1818. {.name = "GAISLER_USBDC"},
  1819. {.name = "01_021"},
  1820. {},
  1821. };
  1822. MODULE_DEVICE_TABLE(of, gr_match);
  1823. static struct platform_driver gr_driver = {
  1824. .driver = {
  1825. .name = DRIVER_NAME,
  1826. .owner = THIS_MODULE,
  1827. .of_match_table = gr_match,
  1828. },
  1829. .probe = gr_probe,
  1830. .remove = gr_remove,
  1831. };
  1832. module_platform_driver(gr_driver);
  1833. MODULE_AUTHOR("Aeroflex Gaisler AB.");
  1834. MODULE_DESCRIPTION(DRIVER_DESC);
  1835. MODULE_LICENSE("GPL");