gr_udc.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268
  1. /*
  2. * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
  3. *
  4. * 2013 (c) Aeroflex Gaisler AB
  5. *
  6. * This driver supports GRUSBDC USB Device Controller cores available in the
  7. * GRLIB VHDL IP core library.
  8. *
  9. * Full documentation of the GRUSBDC core can be found here:
  10. * http://www.gaisler.com/products/grlib/grip.pdf
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the License, or (at your
  15. * option) any later version.
  16. *
  17. * Contributors:
  18. * - Andreas Larsson <andreas@gaisler.com>
  19. * - Marko Isomaki
  20. */
  21. /*
  22. * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
  23. * individually configurable to any of the four USB transfer types. This driver
  24. * only supports cores in DMA mode.
  25. */
  26. #include <linux/kernel.h>
  27. #include <linux/module.h>
  28. #include <linux/slab.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/errno.h>
  31. #include <linux/list.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/device.h>
  34. #include <linux/usb/ch9.h>
  35. #include <linux/usb/gadget.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/dmapool.h>
  38. #include <linux/debugfs.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/of_platform.h>
  41. #include <linux/of_irq.h>
  42. #include <linux/of_address.h>
  43. #include <asm/byteorder.h>
  44. #include "gr_udc.h"
  45. #define DRIVER_NAME "gr_udc"
  46. #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
  47. static const char driver_name[] = DRIVER_NAME;
  48. static const char driver_desc[] = DRIVER_DESC;
  49. #define gr_read32(x) (ioread32be((x)))
  50. #define gr_write32(x, v) (iowrite32be((v), (x)))
  51. /* USB speed and corresponding string calculated from status register value */
  52. #define GR_SPEED(status) \
  53. ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
  54. #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
  55. /* Size of hardware buffer calculated from epctrl register value */
  56. #define GR_BUFFER_SIZE(epctrl) \
  57. ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
  58. GR_EPCTRL_BUFSZ_SCALER)
  59. /* ---------------------------------------------------------------------- */
  60. /* Debug printout functionality */
  61. static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
  62. static const char *gr_ep0state_string(enum gr_ep0state state)
  63. {
  64. static const char *const names[] = {
  65. [GR_EP0_DISCONNECT] = "disconnect",
  66. [GR_EP0_SETUP] = "setup",
  67. [GR_EP0_IDATA] = "idata",
  68. [GR_EP0_ODATA] = "odata",
  69. [GR_EP0_ISTATUS] = "istatus",
  70. [GR_EP0_OSTATUS] = "ostatus",
  71. [GR_EP0_STALL] = "stall",
  72. [GR_EP0_SUSPEND] = "suspend",
  73. };
  74. if (state < 0 || state >= ARRAY_SIZE(names))
  75. return "UNKNOWN";
  76. return names[state];
  77. }
  78. #ifdef VERBOSE_DEBUG
  79. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  80. struct gr_request *req)
  81. {
  82. int buflen = ep->is_in ? req->req.length : req->req.actual;
  83. int rowlen = 32;
  84. int plen = min(rowlen, buflen);
  85. dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
  86. (buflen > plen ? " (truncated)" : ""));
  87. print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
  88. rowlen, 4, req->req.buf, plen, false);
  89. }
  90. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  91. u16 value, u16 index, u16 length)
  92. {
  93. dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
  94. type, request, value, index, length);
  95. }
  96. #else /* !VERBOSE_DEBUG */
  97. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  98. struct gr_request *req) {}
  99. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  100. u16 value, u16 index, u16 length) {}
  101. #endif /* VERBOSE_DEBUG */
  102. /* ---------------------------------------------------------------------- */
  103. /* Debugfs functionality */
  104. #ifdef CONFIG_USB_GADGET_DEBUG_FS
  105. static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
  106. {
  107. u32 epctrl = gr_read32(&ep->regs->epctrl);
  108. u32 epstat = gr_read32(&ep->regs->epstat);
  109. int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
  110. struct gr_request *req;
  111. seq_printf(seq, "%s:\n", ep->ep.name);
  112. seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
  113. seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
  114. seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
  115. seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
  116. seq_printf(seq, " dma_start = %d\n", ep->dma_start);
  117. seq_printf(seq, " stopped = %d\n", ep->stopped);
  118. seq_printf(seq, " wedged = %d\n", ep->wedged);
  119. seq_printf(seq, " callback = %d\n", ep->callback);
  120. seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
  121. seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
  122. seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
  123. if (mode == 1 || mode == 3)
  124. seq_printf(seq, " nt = %d\n",
  125. (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
  126. seq_printf(seq, " Buffer 0: %s %s%d\n",
  127. epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
  128. epstat & GR_EPSTAT_BS ? " " : "selected ",
  129. (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
  130. seq_printf(seq, " Buffer 1: %s %s%d\n",
  131. epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
  132. epstat & GR_EPSTAT_BS ? "selected " : " ",
  133. (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
  134. if (list_empty(&ep->queue)) {
  135. seq_puts(seq, " Queue: empty\n\n");
  136. return;
  137. }
  138. seq_puts(seq, " Queue:\n");
  139. list_for_each_entry(req, &ep->queue, queue) {
  140. struct gr_dma_desc *desc;
  141. struct gr_dma_desc *next;
  142. seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
  143. &req->req.buf, req->req.actual, req->req.length);
  144. next = req->first_desc;
  145. do {
  146. desc = next;
  147. next = desc->next_desc;
  148. seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
  149. desc == req->curr_desc ? 'c' : ' ',
  150. desc, desc->paddr, desc->ctrl, desc->data);
  151. } while (desc != req->last_desc);
  152. }
  153. seq_puts(seq, "\n");
  154. }
  155. static int gr_seq_show(struct seq_file *seq, void *v)
  156. {
  157. struct gr_udc *dev = seq->private;
  158. u32 control = gr_read32(&dev->regs->control);
  159. u32 status = gr_read32(&dev->regs->status);
  160. struct gr_ep *ep;
  161. seq_printf(seq, "usb state = %s\n",
  162. usb_state_string(dev->gadget.state));
  163. seq_printf(seq, "address = %d\n",
  164. (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
  165. seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
  166. seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
  167. seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
  168. seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
  169. seq_printf(seq, "test_mode = %d\n", dev->test_mode);
  170. seq_puts(seq, "\n");
  171. list_for_each_entry(ep, &dev->ep_list, ep_list)
  172. gr_seq_ep_show(seq, ep);
  173. return 0;
  174. }
  175. static int gr_dfs_open(struct inode *inode, struct file *file)
  176. {
  177. return single_open(file, gr_seq_show, inode->i_private);
  178. }
  179. static const struct file_operations gr_dfs_fops = {
  180. .owner = THIS_MODULE,
  181. .open = gr_dfs_open,
  182. .read = seq_read,
  183. .llseek = seq_lseek,
  184. .release = single_release,
  185. };
  186. static void gr_dfs_create(struct gr_udc *dev)
  187. {
  188. const char *name = "gr_udc_state";
  189. dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
  190. dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root, dev,
  191. &gr_dfs_fops);
  192. }
  193. static void gr_dfs_delete(struct gr_udc *dev)
  194. {
  195. /* Handles NULL and ERR pointers internally */
  196. debugfs_remove(dev->dfs_state);
  197. debugfs_remove(dev->dfs_root);
  198. }
  199. #else /* !CONFIG_USB_GADGET_DEBUG_FS */
  200. static void gr_dfs_create(struct gr_udc *dev) {}
  201. static void gr_dfs_delete(struct gr_udc *dev) {}
  202. #endif /* CONFIG_USB_GADGET_DEBUG_FS */
  203. /* ---------------------------------------------------------------------- */
  204. /* DMA and request handling */
  205. /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
  206. static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
  207. {
  208. dma_addr_t paddr;
  209. struct gr_dma_desc *dma_desc;
  210. dma_desc = dma_pool_alloc(ep->dev->desc_pool, gfp_flags, &paddr);
  211. if (!dma_desc) {
  212. dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
  213. return NULL;
  214. }
  215. memset(dma_desc, 0, sizeof(*dma_desc));
  216. dma_desc->paddr = paddr;
  217. return dma_desc;
  218. }
  219. static inline void gr_free_dma_desc(struct gr_udc *dev,
  220. struct gr_dma_desc *desc)
  221. {
  222. dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
  223. }
  224. /* Frees the chain of struct gr_dma_desc for the given request */
  225. static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
  226. {
  227. struct gr_dma_desc *desc;
  228. struct gr_dma_desc *next;
  229. next = req->first_desc;
  230. if (!next)
  231. return;
  232. do {
  233. desc = next;
  234. next = desc->next_desc;
  235. gr_free_dma_desc(dev, desc);
  236. } while (desc != req->last_desc);
  237. req->first_desc = NULL;
  238. req->curr_desc = NULL;
  239. req->last_desc = NULL;
  240. }
  241. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
  242. /*
  243. * Frees allocated resources and calls the appropriate completion function/setup
  244. * package handler for a finished request.
  245. *
  246. * Must be called with dev->lock held and irqs disabled.
  247. */
  248. static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
  249. int status)
  250. __releases(&dev->lock)
  251. __acquires(&dev->lock)
  252. {
  253. struct gr_udc *dev;
  254. list_del_init(&req->queue);
  255. if (likely(req->req.status == -EINPROGRESS))
  256. req->req.status = status;
  257. else
  258. status = req->req.status;
  259. dev = ep->dev;
  260. usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
  261. gr_free_dma_desc_chain(dev, req);
  262. if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
  263. req->req.actual = req->req.length;
  264. } else if (req->oddlen && req->req.actual > req->evenlen) {
  265. /*
  266. * Copy to user buffer in this case where length was not evenly
  267. * divisible by ep->ep.maxpacket and the last descriptor was
  268. * actually used.
  269. */
  270. char *buftail = ((char *)req->req.buf + req->evenlen);
  271. memcpy(buftail, ep->tailbuf, req->oddlen);
  272. if (req->req.actual > req->req.length) {
  273. /* We got more data than was requested */
  274. dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
  275. ep->ep.name);
  276. gr_dbgprint_request("OVFL", ep, req);
  277. req->req.status = -EOVERFLOW;
  278. }
  279. }
  280. if (!status) {
  281. if (ep->is_in)
  282. gr_dbgprint_request("SENT", ep, req);
  283. else
  284. gr_dbgprint_request("RECV", ep, req);
  285. }
  286. /* Prevent changes to ep->queue during callback */
  287. ep->callback = 1;
  288. if (req == dev->ep0reqo && !status) {
  289. if (req->setup)
  290. gr_ep0_setup(dev, req);
  291. else
  292. dev_err(dev->dev,
  293. "Unexpected non setup packet on ep0in\n");
  294. } else if (req->req.complete) {
  295. spin_unlock(&dev->lock);
  296. usb_gadget_giveback_request(&ep->ep, &req->req);
  297. spin_lock(&dev->lock);
  298. }
  299. ep->callback = 0;
  300. }
  301. static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  302. {
  303. struct gr_request *req;
  304. req = kzalloc(sizeof(*req), gfp_flags);
  305. if (!req)
  306. return NULL;
  307. INIT_LIST_HEAD(&req->queue);
  308. return &req->req;
  309. }
  310. /*
  311. * Starts DMA for endpoint ep if there are requests in the queue.
  312. *
  313. * Must be called with dev->lock held and with !ep->stopped.
  314. */
  315. static void gr_start_dma(struct gr_ep *ep)
  316. {
  317. struct gr_request *req;
  318. u32 dmactrl;
  319. if (list_empty(&ep->queue)) {
  320. ep->dma_start = 0;
  321. return;
  322. }
  323. req = list_first_entry(&ep->queue, struct gr_request, queue);
  324. /* A descriptor should already have been allocated */
  325. BUG_ON(!req->curr_desc);
  326. /*
  327. * The DMA controller can not handle smaller OUT buffers than
  328. * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
  329. * long packet are received. Therefore an internal bounce buffer gets
  330. * used when such a request gets enabled.
  331. */
  332. if (!ep->is_in && req->oddlen)
  333. req->last_desc->data = ep->tailbuf_paddr;
  334. wmb(); /* Make sure all is settled before handing it over to DMA */
  335. /* Set the descriptor pointer in the hardware */
  336. gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
  337. /* Announce available descriptors */
  338. dmactrl = gr_read32(&ep->regs->dmactrl);
  339. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
  340. ep->dma_start = 1;
  341. }
  342. /*
  343. * Finishes the first request in the ep's queue and, if available, starts the
  344. * next request in queue.
  345. *
  346. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  347. */
  348. static void gr_dma_advance(struct gr_ep *ep, int status)
  349. {
  350. struct gr_request *req;
  351. req = list_first_entry(&ep->queue, struct gr_request, queue);
  352. gr_finish_request(ep, req, status);
  353. gr_start_dma(ep); /* Regardless of ep->dma_start */
  354. }
  355. /*
  356. * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
  357. * transfer to be canceled and clears GR_DMACTRL_DA.
  358. *
  359. * Must be called with dev->lock held.
  360. */
  361. static void gr_abort_dma(struct gr_ep *ep)
  362. {
  363. u32 dmactrl;
  364. dmactrl = gr_read32(&ep->regs->dmactrl);
  365. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
  366. }
  367. /*
  368. * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
  369. * chain.
  370. *
  371. * Size is not used for OUT endpoints. Hardware can not be instructed to handle
  372. * smaller buffer than MAXPL in the OUT direction.
  373. */
  374. static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
  375. dma_addr_t data, unsigned size, gfp_t gfp_flags)
  376. {
  377. struct gr_dma_desc *desc;
  378. desc = gr_alloc_dma_desc(ep, gfp_flags);
  379. if (!desc)
  380. return -ENOMEM;
  381. desc->data = data;
  382. if (ep->is_in)
  383. desc->ctrl =
  384. (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
  385. else
  386. desc->ctrl = GR_DESC_OUT_CTRL_IE;
  387. if (!req->first_desc) {
  388. req->first_desc = desc;
  389. req->curr_desc = desc;
  390. } else {
  391. req->last_desc->next_desc = desc;
  392. req->last_desc->next = desc->paddr;
  393. req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
  394. }
  395. req->last_desc = desc;
  396. return 0;
  397. }
  398. /*
  399. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  400. * together covers req->req.length bytes of the buffer at DMA address
  401. * req->req.dma for the OUT direction.
  402. *
  403. * The first descriptor in the chain is enabled, the rest disabled. The
  404. * interrupt handler will later enable them one by one when needed so we can
  405. * find out when the transfer is finished. For OUT endpoints, all descriptors
  406. * therefore generate interrutps.
  407. */
  408. static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
  409. gfp_t gfp_flags)
  410. {
  411. u16 bytes_left; /* Bytes left to provide descriptors for */
  412. u16 bytes_used; /* Bytes accommodated for */
  413. int ret = 0;
  414. req->first_desc = NULL; /* Signals that no allocation is done yet */
  415. bytes_left = req->req.length;
  416. bytes_used = 0;
  417. while (bytes_left > 0) {
  418. dma_addr_t start = req->req.dma + bytes_used;
  419. u16 size = min(bytes_left, ep->bytes_per_buffer);
  420. if (size < ep->bytes_per_buffer) {
  421. /* Prepare using bounce buffer */
  422. req->evenlen = req->req.length - bytes_left;
  423. req->oddlen = size;
  424. }
  425. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  426. if (ret)
  427. goto alloc_err;
  428. bytes_left -= size;
  429. bytes_used += size;
  430. }
  431. req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  432. return 0;
  433. alloc_err:
  434. gr_free_dma_desc_chain(ep->dev, req);
  435. return ret;
  436. }
  437. /*
  438. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  439. * together covers req->req.length bytes of the buffer at DMA address
  440. * req->req.dma for the IN direction.
  441. *
  442. * When more data is provided than the maximum payload size, the hardware splits
  443. * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
  444. * is always set to a multiple of the maximum payload (restricted to the valid
  445. * number of maximum payloads during high bandwidth isochronous or interrupt
  446. * transfers)
  447. *
  448. * All descriptors are enabled from the beginning and we only generate an
  449. * interrupt for the last one indicating that the entire request has been pushed
  450. * to hardware.
  451. */
  452. static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
  453. gfp_t gfp_flags)
  454. {
  455. u16 bytes_left; /* Bytes left in req to provide descriptors for */
  456. u16 bytes_used; /* Bytes in req accommodated for */
  457. int ret = 0;
  458. req->first_desc = NULL; /* Signals that no allocation is done yet */
  459. bytes_left = req->req.length;
  460. bytes_used = 0;
  461. do { /* Allow for zero length packets */
  462. dma_addr_t start = req->req.dma + bytes_used;
  463. u16 size = min(bytes_left, ep->bytes_per_buffer);
  464. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  465. if (ret)
  466. goto alloc_err;
  467. bytes_left -= size;
  468. bytes_used += size;
  469. } while (bytes_left > 0);
  470. /*
  471. * Send an extra zero length packet to indicate that no more data is
  472. * available when req->req.zero is set and the data length is even
  473. * multiples of ep->ep.maxpacket.
  474. */
  475. if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
  476. ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
  477. if (ret)
  478. goto alloc_err;
  479. }
  480. /*
  481. * For IN packets we only want to know when the last packet has been
  482. * transmitted (not just put into internal buffers).
  483. */
  484. req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
  485. return 0;
  486. alloc_err:
  487. gr_free_dma_desc_chain(ep->dev, req);
  488. return ret;
  489. }
  490. /* Must be called with dev->lock held */
  491. static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
  492. {
  493. struct gr_udc *dev = ep->dev;
  494. int ret;
  495. if (unlikely(!ep->ep.desc && ep->num != 0)) {
  496. dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
  497. return -EINVAL;
  498. }
  499. if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
  500. dev_err(dev->dev,
  501. "Invalid request for %s: buf=%p list_empty=%d\n",
  502. ep->ep.name, req->req.buf, list_empty(&req->queue));
  503. return -EINVAL;
  504. }
  505. if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
  506. dev_err(dev->dev, "-ESHUTDOWN");
  507. return -ESHUTDOWN;
  508. }
  509. /* Can't touch registers when suspended */
  510. if (dev->ep0state == GR_EP0_SUSPEND) {
  511. dev_err(dev->dev, "-EBUSY");
  512. return -EBUSY;
  513. }
  514. /* Set up DMA mapping in case the caller didn't */
  515. ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
  516. if (ret) {
  517. dev_err(dev->dev, "usb_gadget_map_request");
  518. return ret;
  519. }
  520. if (ep->is_in)
  521. ret = gr_setup_in_desc_list(ep, req, gfp_flags);
  522. else
  523. ret = gr_setup_out_desc_list(ep, req, gfp_flags);
  524. if (ret)
  525. return ret;
  526. req->req.status = -EINPROGRESS;
  527. req->req.actual = 0;
  528. list_add_tail(&req->queue, &ep->queue);
  529. /* Start DMA if not started, otherwise interrupt handler handles it */
  530. if (!ep->dma_start && likely(!ep->stopped))
  531. gr_start_dma(ep);
  532. return 0;
  533. }
  534. /*
  535. * Queue a request from within the driver.
  536. *
  537. * Must be called with dev->lock held.
  538. */
  539. static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
  540. gfp_t gfp_flags)
  541. {
  542. if (ep->is_in)
  543. gr_dbgprint_request("RESP", ep, req);
  544. return gr_queue(ep, req, gfp_flags);
  545. }
  546. /* ---------------------------------------------------------------------- */
  547. /* General helper functions */
  548. /*
  549. * Dequeue ALL requests.
  550. *
  551. * Must be called with dev->lock held and irqs disabled.
  552. */
  553. static void gr_ep_nuke(struct gr_ep *ep)
  554. {
  555. struct gr_request *req;
  556. ep->stopped = 1;
  557. ep->dma_start = 0;
  558. gr_abort_dma(ep);
  559. while (!list_empty(&ep->queue)) {
  560. req = list_first_entry(&ep->queue, struct gr_request, queue);
  561. gr_finish_request(ep, req, -ESHUTDOWN);
  562. }
  563. }
  564. /*
  565. * Reset the hardware state of this endpoint.
  566. *
  567. * Must be called with dev->lock held.
  568. */
  569. static void gr_ep_reset(struct gr_ep *ep)
  570. {
  571. gr_write32(&ep->regs->epctrl, 0);
  572. gr_write32(&ep->regs->dmactrl, 0);
  573. ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
  574. ep->ep.desc = NULL;
  575. ep->stopped = 1;
  576. ep->dma_start = 0;
  577. }
  578. /*
  579. * Generate STALL on ep0in/out.
  580. *
  581. * Must be called with dev->lock held.
  582. */
  583. static void gr_control_stall(struct gr_udc *dev)
  584. {
  585. u32 epctrl;
  586. epctrl = gr_read32(&dev->epo[0].regs->epctrl);
  587. gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  588. epctrl = gr_read32(&dev->epi[0].regs->epctrl);
  589. gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  590. dev->ep0state = GR_EP0_STALL;
  591. }
  592. /*
  593. * Halts, halts and wedges, or clears halt for an endpoint.
  594. *
  595. * Must be called with dev->lock held.
  596. */
  597. static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
  598. {
  599. u32 epctrl;
  600. int retval = 0;
  601. if (ep->num && !ep->ep.desc)
  602. return -EINVAL;
  603. if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
  604. return -EOPNOTSUPP;
  605. /* Never actually halt ep0, and therefore never clear halt for ep0 */
  606. if (!ep->num) {
  607. if (halt && !fromhost) {
  608. /* ep0 halt from gadget - generate protocol stall */
  609. gr_control_stall(ep->dev);
  610. dev_dbg(ep->dev->dev, "EP: stall ep0\n");
  611. return 0;
  612. }
  613. return -EINVAL;
  614. }
  615. dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
  616. (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
  617. epctrl = gr_read32(&ep->regs->epctrl);
  618. if (halt) {
  619. /* Set HALT */
  620. gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
  621. ep->stopped = 1;
  622. if (wedge)
  623. ep->wedged = 1;
  624. } else {
  625. gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
  626. ep->stopped = 0;
  627. ep->wedged = 0;
  628. /* Things might have been queued up in the meantime */
  629. if (!ep->dma_start)
  630. gr_start_dma(ep);
  631. }
  632. return retval;
  633. }
  634. /* Must be called with dev->lock held */
  635. static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
  636. {
  637. if (dev->ep0state != value)
  638. dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
  639. gr_ep0state_string(value));
  640. dev->ep0state = value;
  641. }
  642. /*
  643. * Should only be called when endpoints can not generate interrupts.
  644. *
  645. * Must be called with dev->lock held.
  646. */
  647. static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
  648. {
  649. gr_write32(&dev->regs->control, 0);
  650. wmb(); /* Make sure that we do not deny one of our interrupts */
  651. dev->irq_enabled = 0;
  652. }
  653. /*
  654. * Stop all device activity and disable data line pullup.
  655. *
  656. * Must be called with dev->lock held and irqs disabled.
  657. */
  658. static void gr_stop_activity(struct gr_udc *dev)
  659. {
  660. struct gr_ep *ep;
  661. list_for_each_entry(ep, &dev->ep_list, ep_list)
  662. gr_ep_nuke(ep);
  663. gr_disable_interrupts_and_pullup(dev);
  664. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  665. usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
  666. }
  667. /* ---------------------------------------------------------------------- */
  668. /* ep0 setup packet handling */
  669. static void gr_ep0_testmode_complete(struct usb_ep *_ep,
  670. struct usb_request *_req)
  671. {
  672. struct gr_ep *ep;
  673. struct gr_udc *dev;
  674. u32 control;
  675. ep = container_of(_ep, struct gr_ep, ep);
  676. dev = ep->dev;
  677. spin_lock(&dev->lock);
  678. control = gr_read32(&dev->regs->control);
  679. control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
  680. gr_write32(&dev->regs->control, control);
  681. spin_unlock(&dev->lock);
  682. }
  683. static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
  684. {
  685. /* Nothing needs to be done here */
  686. }
  687. /*
  688. * Queue a response on ep0in.
  689. *
  690. * Must be called with dev->lock held.
  691. */
  692. static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
  693. void (*complete)(struct usb_ep *ep,
  694. struct usb_request *req))
  695. {
  696. u8 *reqbuf = dev->ep0reqi->req.buf;
  697. int status;
  698. int i;
  699. for (i = 0; i < length; i++)
  700. reqbuf[i] = buf[i];
  701. dev->ep0reqi->req.length = length;
  702. dev->ep0reqi->req.complete = complete;
  703. status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
  704. if (status < 0)
  705. dev_err(dev->dev,
  706. "Could not queue ep0in setup response: %d\n", status);
  707. return status;
  708. }
  709. /*
  710. * Queue a 2 byte response on ep0in.
  711. *
  712. * Must be called with dev->lock held.
  713. */
  714. static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
  715. {
  716. __le16 le_response = cpu_to_le16(response);
  717. return gr_ep0_respond(dev, (u8 *)&le_response, 2,
  718. gr_ep0_dummy_complete);
  719. }
  720. /*
  721. * Queue a ZLP response on ep0in.
  722. *
  723. * Must be called with dev->lock held.
  724. */
  725. static inline int gr_ep0_respond_empty(struct gr_udc *dev)
  726. {
  727. return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
  728. }
  729. /*
  730. * This is run when a SET_ADDRESS request is received. First writes
  731. * the new address to the control register which is updated internally
  732. * when the next IN packet is ACKED.
  733. *
  734. * Must be called with dev->lock held.
  735. */
  736. static void gr_set_address(struct gr_udc *dev, u8 address)
  737. {
  738. u32 control;
  739. control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
  740. control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
  741. control |= GR_CONTROL_SU;
  742. gr_write32(&dev->regs->control, control);
  743. }
  744. /*
  745. * Returns negative for STALL, 0 for successful handling and positive for
  746. * delegation.
  747. *
  748. * Must be called with dev->lock held.
  749. */
  750. static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
  751. u16 value, u16 index)
  752. {
  753. u16 response;
  754. u8 test;
  755. switch (request) {
  756. case USB_REQ_SET_ADDRESS:
  757. dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
  758. gr_set_address(dev, value & 0xff);
  759. if (value)
  760. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  761. else
  762. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  763. return gr_ep0_respond_empty(dev);
  764. case USB_REQ_GET_STATUS:
  765. /* Self powered | remote wakeup */
  766. response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
  767. return gr_ep0_respond_u16(dev, response);
  768. case USB_REQ_SET_FEATURE:
  769. switch (value) {
  770. case USB_DEVICE_REMOTE_WAKEUP:
  771. /* Allow remote wakeup */
  772. dev->remote_wakeup = 1;
  773. return gr_ep0_respond_empty(dev);
  774. case USB_DEVICE_TEST_MODE:
  775. /* The hardware does not support TEST_FORCE_EN */
  776. test = index >> 8;
  777. if (test >= TEST_J && test <= TEST_PACKET) {
  778. dev->test_mode = test;
  779. return gr_ep0_respond(dev, NULL, 0,
  780. gr_ep0_testmode_complete);
  781. }
  782. }
  783. break;
  784. case USB_REQ_CLEAR_FEATURE:
  785. switch (value) {
  786. case USB_DEVICE_REMOTE_WAKEUP:
  787. /* Disallow remote wakeup */
  788. dev->remote_wakeup = 0;
  789. return gr_ep0_respond_empty(dev);
  790. }
  791. break;
  792. }
  793. return 1; /* Delegate the rest */
  794. }
  795. /*
  796. * Returns negative for STALL, 0 for successful handling and positive for
  797. * delegation.
  798. *
  799. * Must be called with dev->lock held.
  800. */
  801. static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
  802. u16 value, u16 index)
  803. {
  804. if (dev->gadget.state != USB_STATE_CONFIGURED)
  805. return -1;
  806. /*
  807. * Should return STALL for invalid interfaces, but udc driver does not
  808. * know anything about that. However, many gadget drivers do not handle
  809. * GET_STATUS so we need to take care of that.
  810. */
  811. switch (request) {
  812. case USB_REQ_GET_STATUS:
  813. return gr_ep0_respond_u16(dev, 0x0000);
  814. case USB_REQ_SET_FEATURE:
  815. case USB_REQ_CLEAR_FEATURE:
  816. /*
  817. * No possible valid standard requests. Still let gadget drivers
  818. * have a go at it.
  819. */
  820. break;
  821. }
  822. return 1; /* Delegate the rest */
  823. }
  824. /*
  825. * Returns negative for STALL, 0 for successful handling and positive for
  826. * delegation.
  827. *
  828. * Must be called with dev->lock held.
  829. */
  830. static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
  831. u16 value, u16 index)
  832. {
  833. struct gr_ep *ep;
  834. int status;
  835. int halted;
  836. u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
  837. u8 is_in = index & USB_ENDPOINT_DIR_MASK;
  838. if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
  839. return -1;
  840. if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
  841. return -1;
  842. ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
  843. switch (request) {
  844. case USB_REQ_GET_STATUS:
  845. halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
  846. return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
  847. case USB_REQ_SET_FEATURE:
  848. switch (value) {
  849. case USB_ENDPOINT_HALT:
  850. status = gr_ep_halt_wedge(ep, 1, 0, 1);
  851. if (status >= 0)
  852. status = gr_ep0_respond_empty(dev);
  853. return status;
  854. }
  855. break;
  856. case USB_REQ_CLEAR_FEATURE:
  857. switch (value) {
  858. case USB_ENDPOINT_HALT:
  859. if (ep->wedged)
  860. return -1;
  861. status = gr_ep_halt_wedge(ep, 0, 0, 1);
  862. if (status >= 0)
  863. status = gr_ep0_respond_empty(dev);
  864. return status;
  865. }
  866. break;
  867. }
  868. return 1; /* Delegate the rest */
  869. }
  870. /* Must be called with dev->lock held */
  871. static void gr_ep0out_requeue(struct gr_udc *dev)
  872. {
  873. int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
  874. if (ret)
  875. dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
  876. ret);
  877. }
  878. /*
  879. * The main function dealing with setup requests on ep0.
  880. *
  881. * Must be called with dev->lock held and irqs disabled
  882. */
  883. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
  884. __releases(&dev->lock)
  885. __acquires(&dev->lock)
  886. {
  887. union {
  888. struct usb_ctrlrequest ctrl;
  889. u8 raw[8];
  890. u32 word[2];
  891. } u;
  892. u8 type;
  893. u8 request;
  894. u16 value;
  895. u16 index;
  896. u16 length;
  897. int i;
  898. int status;
  899. /* Restore from ep0 halt */
  900. if (dev->ep0state == GR_EP0_STALL) {
  901. gr_set_ep0state(dev, GR_EP0_SETUP);
  902. if (!req->req.actual)
  903. goto out;
  904. }
  905. if (dev->ep0state == GR_EP0_ISTATUS) {
  906. gr_set_ep0state(dev, GR_EP0_SETUP);
  907. if (req->req.actual > 0)
  908. dev_dbg(dev->dev,
  909. "Unexpected setup packet at state %s\n",
  910. gr_ep0state_string(GR_EP0_ISTATUS));
  911. else
  912. goto out; /* Got expected ZLP */
  913. } else if (dev->ep0state != GR_EP0_SETUP) {
  914. dev_info(dev->dev,
  915. "Unexpected ep0out request at state %s - stalling\n",
  916. gr_ep0state_string(dev->ep0state));
  917. gr_control_stall(dev);
  918. gr_set_ep0state(dev, GR_EP0_SETUP);
  919. goto out;
  920. } else if (!req->req.actual) {
  921. dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
  922. gr_ep0state_string(dev->ep0state));
  923. goto out;
  924. }
  925. /* Handle SETUP packet */
  926. for (i = 0; i < req->req.actual; i++)
  927. u.raw[i] = ((u8 *)req->req.buf)[i];
  928. type = u.ctrl.bRequestType;
  929. request = u.ctrl.bRequest;
  930. value = le16_to_cpu(u.ctrl.wValue);
  931. index = le16_to_cpu(u.ctrl.wIndex);
  932. length = le16_to_cpu(u.ctrl.wLength);
  933. gr_dbgprint_devreq(dev, type, request, value, index, length);
  934. /* Check for data stage */
  935. if (length) {
  936. if (type & USB_DIR_IN)
  937. gr_set_ep0state(dev, GR_EP0_IDATA);
  938. else
  939. gr_set_ep0state(dev, GR_EP0_ODATA);
  940. }
  941. status = 1; /* Positive status flags delegation */
  942. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  943. switch (type & USB_RECIP_MASK) {
  944. case USB_RECIP_DEVICE:
  945. status = gr_device_request(dev, type, request,
  946. value, index);
  947. break;
  948. case USB_RECIP_ENDPOINT:
  949. status = gr_endpoint_request(dev, type, request,
  950. value, index);
  951. break;
  952. case USB_RECIP_INTERFACE:
  953. status = gr_interface_request(dev, type, request,
  954. value, index);
  955. break;
  956. }
  957. }
  958. if (status > 0) {
  959. spin_unlock(&dev->lock);
  960. dev_vdbg(dev->dev, "DELEGATE\n");
  961. status = dev->driver->setup(&dev->gadget, &u.ctrl);
  962. spin_lock(&dev->lock);
  963. }
  964. /* Generate STALL on both ep0out and ep0in if requested */
  965. if (unlikely(status < 0)) {
  966. dev_vdbg(dev->dev, "STALL\n");
  967. gr_control_stall(dev);
  968. }
  969. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
  970. request == USB_REQ_SET_CONFIGURATION) {
  971. if (!value) {
  972. dev_dbg(dev->dev, "STATUS: deconfigured\n");
  973. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  974. } else if (status >= 0) {
  975. /* Not configured unless gadget OK:s it */
  976. dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
  977. usb_gadget_set_state(&dev->gadget,
  978. USB_STATE_CONFIGURED);
  979. }
  980. }
  981. /* Get ready for next stage */
  982. if (dev->ep0state == GR_EP0_ODATA)
  983. gr_set_ep0state(dev, GR_EP0_OSTATUS);
  984. else if (dev->ep0state == GR_EP0_IDATA)
  985. gr_set_ep0state(dev, GR_EP0_ISTATUS);
  986. else
  987. gr_set_ep0state(dev, GR_EP0_SETUP);
  988. out:
  989. gr_ep0out_requeue(dev);
  990. }
  991. /* ---------------------------------------------------------------------- */
  992. /* VBUS and USB reset handling */
  993. /* Must be called with dev->lock held and irqs disabled */
  994. static void gr_vbus_connected(struct gr_udc *dev, u32 status)
  995. {
  996. u32 control;
  997. dev->gadget.speed = GR_SPEED(status);
  998. usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
  999. /* Turn on full interrupts and pullup */
  1000. control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
  1001. GR_CONTROL_SP | GR_CONTROL_EP);
  1002. gr_write32(&dev->regs->control, control);
  1003. }
  1004. /* Must be called with dev->lock held */
  1005. static void gr_enable_vbus_detect(struct gr_udc *dev)
  1006. {
  1007. u32 status;
  1008. dev->irq_enabled = 1;
  1009. wmb(); /* Make sure we do not ignore an interrupt */
  1010. gr_write32(&dev->regs->control, GR_CONTROL_VI);
  1011. /* Take care of the case we are already plugged in at this point */
  1012. status = gr_read32(&dev->regs->status);
  1013. if (status & GR_STATUS_VB)
  1014. gr_vbus_connected(dev, status);
  1015. }
  1016. /* Must be called with dev->lock held and irqs disabled */
  1017. static void gr_vbus_disconnected(struct gr_udc *dev)
  1018. {
  1019. gr_stop_activity(dev);
  1020. /* Report disconnect */
  1021. if (dev->driver && dev->driver->disconnect) {
  1022. spin_unlock(&dev->lock);
  1023. dev->driver->disconnect(&dev->gadget);
  1024. spin_lock(&dev->lock);
  1025. }
  1026. gr_enable_vbus_detect(dev);
  1027. }
  1028. /* Must be called with dev->lock held and irqs disabled */
  1029. static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
  1030. {
  1031. gr_set_address(dev, 0);
  1032. gr_set_ep0state(dev, GR_EP0_SETUP);
  1033. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  1034. dev->gadget.speed = GR_SPEED(status);
  1035. gr_ep_nuke(&dev->epo[0]);
  1036. gr_ep_nuke(&dev->epi[0]);
  1037. dev->epo[0].stopped = 0;
  1038. dev->epi[0].stopped = 0;
  1039. gr_ep0out_requeue(dev);
  1040. }
  1041. /* ---------------------------------------------------------------------- */
  1042. /* Irq handling */
  1043. /*
  1044. * Handles interrupts from in endpoints. Returns whether something was handled.
  1045. *
  1046. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1047. */
  1048. static int gr_handle_in_ep(struct gr_ep *ep)
  1049. {
  1050. struct gr_request *req;
  1051. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1052. if (!req->last_desc)
  1053. return 0;
  1054. if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
  1055. return 0; /* Not put in hardware buffers yet */
  1056. if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
  1057. return 0; /* Not transmitted yet, still in hardware buffers */
  1058. /* Write complete */
  1059. gr_dma_advance(ep, 0);
  1060. return 1;
  1061. }
  1062. /*
  1063. * Handles interrupts from out endpoints. Returns whether something was handled.
  1064. *
  1065. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1066. */
  1067. static int gr_handle_out_ep(struct gr_ep *ep)
  1068. {
  1069. u32 ep_dmactrl;
  1070. u32 ctrl;
  1071. u16 len;
  1072. struct gr_request *req;
  1073. struct gr_udc *dev = ep->dev;
  1074. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1075. if (!req->curr_desc)
  1076. return 0;
  1077. ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
  1078. if (ctrl & GR_DESC_OUT_CTRL_EN)
  1079. return 0; /* Not received yet */
  1080. /* Read complete */
  1081. len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
  1082. req->req.actual += len;
  1083. if (ctrl & GR_DESC_OUT_CTRL_SE)
  1084. req->setup = 1;
  1085. if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
  1086. /* Short packet or >= expected size - we are done */
  1087. if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
  1088. /*
  1089. * Send a status stage ZLP to ack the DATA stage in the
  1090. * OUT direction. This needs to be done before
  1091. * gr_dma_advance as that can lead to a call to
  1092. * ep0_setup that can change dev->ep0state.
  1093. */
  1094. gr_ep0_respond_empty(dev);
  1095. gr_set_ep0state(dev, GR_EP0_SETUP);
  1096. }
  1097. gr_dma_advance(ep, 0);
  1098. } else {
  1099. /* Not done yet. Enable the next descriptor to receive more. */
  1100. req->curr_desc = req->curr_desc->next_desc;
  1101. req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  1102. ep_dmactrl = gr_read32(&ep->regs->dmactrl);
  1103. gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
  1104. }
  1105. return 1;
  1106. }
  1107. /*
  1108. * Handle state changes. Returns whether something was handled.
  1109. *
  1110. * Must be called with dev->lock held and irqs disabled.
  1111. */
  1112. static int gr_handle_state_changes(struct gr_udc *dev)
  1113. {
  1114. u32 status = gr_read32(&dev->regs->status);
  1115. int handled = 0;
  1116. int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
  1117. dev->gadget.state == USB_STATE_ATTACHED);
  1118. /* VBUS valid detected */
  1119. if (!powstate && (status & GR_STATUS_VB)) {
  1120. dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
  1121. gr_vbus_connected(dev, status);
  1122. handled = 1;
  1123. }
  1124. /* Disconnect */
  1125. if (powstate && !(status & GR_STATUS_VB)) {
  1126. dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
  1127. gr_vbus_disconnected(dev);
  1128. handled = 1;
  1129. }
  1130. /* USB reset detected */
  1131. if (status & GR_STATUS_UR) {
  1132. dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
  1133. GR_SPEED_STR(status));
  1134. gr_write32(&dev->regs->status, GR_STATUS_UR);
  1135. gr_udc_usbreset(dev, status);
  1136. handled = 1;
  1137. }
  1138. /* Speed change */
  1139. if (dev->gadget.speed != GR_SPEED(status)) {
  1140. dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
  1141. GR_SPEED_STR(status));
  1142. dev->gadget.speed = GR_SPEED(status);
  1143. handled = 1;
  1144. }
  1145. /* Going into suspend */
  1146. if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
  1147. dev_dbg(dev->dev, "STATUS: USB suspend\n");
  1148. gr_set_ep0state(dev, GR_EP0_SUSPEND);
  1149. dev->suspended_from = dev->gadget.state;
  1150. usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
  1151. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1152. dev->driver && dev->driver->suspend) {
  1153. spin_unlock(&dev->lock);
  1154. dev->driver->suspend(&dev->gadget);
  1155. spin_lock(&dev->lock);
  1156. }
  1157. handled = 1;
  1158. }
  1159. /* Coming out of suspend */
  1160. if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
  1161. dev_dbg(dev->dev, "STATUS: USB resume\n");
  1162. if (dev->suspended_from == USB_STATE_POWERED)
  1163. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1164. else
  1165. gr_set_ep0state(dev, GR_EP0_SETUP);
  1166. usb_gadget_set_state(&dev->gadget, dev->suspended_from);
  1167. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1168. dev->driver && dev->driver->resume) {
  1169. spin_unlock(&dev->lock);
  1170. dev->driver->resume(&dev->gadget);
  1171. spin_lock(&dev->lock);
  1172. }
  1173. handled = 1;
  1174. }
  1175. return handled;
  1176. }
  1177. /* Non-interrupt context irq handler */
  1178. static irqreturn_t gr_irq_handler(int irq, void *_dev)
  1179. {
  1180. struct gr_udc *dev = _dev;
  1181. struct gr_ep *ep;
  1182. int handled = 0;
  1183. int i;
  1184. unsigned long flags;
  1185. spin_lock_irqsave(&dev->lock, flags);
  1186. if (!dev->irq_enabled)
  1187. goto out;
  1188. /*
  1189. * Check IN ep interrupts. We check these before the OUT eps because
  1190. * some gadgets reuse the request that might already be currently
  1191. * outstanding and needs to be completed (mainly setup requests).
  1192. */
  1193. for (i = 0; i < dev->nepi; i++) {
  1194. ep = &dev->epi[i];
  1195. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1196. handled = gr_handle_in_ep(ep) || handled;
  1197. }
  1198. /* Check OUT ep interrupts */
  1199. for (i = 0; i < dev->nepo; i++) {
  1200. ep = &dev->epo[i];
  1201. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1202. handled = gr_handle_out_ep(ep) || handled;
  1203. }
  1204. /* Check status interrupts */
  1205. handled = gr_handle_state_changes(dev) || handled;
  1206. /*
  1207. * Check AMBA DMA errors. Only check if we didn't find anything else to
  1208. * handle because this shouldn't happen if we did everything right.
  1209. */
  1210. if (!handled) {
  1211. list_for_each_entry(ep, &dev->ep_list, ep_list) {
  1212. if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
  1213. dev_err(dev->dev,
  1214. "AMBA Error occurred for %s\n",
  1215. ep->ep.name);
  1216. handled = 1;
  1217. }
  1218. }
  1219. }
  1220. out:
  1221. spin_unlock_irqrestore(&dev->lock, flags);
  1222. return handled ? IRQ_HANDLED : IRQ_NONE;
  1223. }
  1224. /* Interrupt context irq handler */
  1225. static irqreturn_t gr_irq(int irq, void *_dev)
  1226. {
  1227. struct gr_udc *dev = _dev;
  1228. if (!dev->irq_enabled)
  1229. return IRQ_NONE;
  1230. return IRQ_WAKE_THREAD;
  1231. }
  1232. /* ---------------------------------------------------------------------- */
  1233. /* USB ep ops */
  1234. /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
  1235. static int gr_ep_enable(struct usb_ep *_ep,
  1236. const struct usb_endpoint_descriptor *desc)
  1237. {
  1238. struct gr_udc *dev;
  1239. struct gr_ep *ep;
  1240. u8 mode;
  1241. u8 nt;
  1242. u16 max;
  1243. u16 buffer_size = 0;
  1244. u32 epctrl;
  1245. ep = container_of(_ep, struct gr_ep, ep);
  1246. if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
  1247. return -EINVAL;
  1248. dev = ep->dev;
  1249. /* 'ep0' IN and OUT are reserved */
  1250. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1251. return -EINVAL;
  1252. if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
  1253. return -ESHUTDOWN;
  1254. /* Make sure we are clear for enabling */
  1255. epctrl = gr_read32(&ep->regs->epctrl);
  1256. if (epctrl & GR_EPCTRL_EV)
  1257. return -EBUSY;
  1258. /* Check that directions match */
  1259. if (!ep->is_in != !usb_endpoint_dir_in(desc))
  1260. return -EINVAL;
  1261. /* Check ep num */
  1262. if ((!ep->is_in && ep->num >= dev->nepo) ||
  1263. (ep->is_in && ep->num >= dev->nepi))
  1264. return -EINVAL;
  1265. if (usb_endpoint_xfer_control(desc)) {
  1266. mode = 0;
  1267. } else if (usb_endpoint_xfer_isoc(desc)) {
  1268. mode = 1;
  1269. } else if (usb_endpoint_xfer_bulk(desc)) {
  1270. mode = 2;
  1271. } else if (usb_endpoint_xfer_int(desc)) {
  1272. mode = 3;
  1273. } else {
  1274. dev_err(dev->dev, "Unknown transfer type for %s\n",
  1275. ep->ep.name);
  1276. return -EINVAL;
  1277. }
  1278. /*
  1279. * Bits 10-0 set the max payload. 12-11 set the number of
  1280. * additional transactions.
  1281. */
  1282. max = 0x7ff & usb_endpoint_maxp(desc);
  1283. nt = 0x3 & (usb_endpoint_maxp(desc) >> 11);
  1284. buffer_size = GR_BUFFER_SIZE(epctrl);
  1285. if (nt && (mode == 0 || mode == 2)) {
  1286. dev_err(dev->dev,
  1287. "%s mode: multiple trans./microframe not valid\n",
  1288. (mode == 2 ? "Bulk" : "Control"));
  1289. return -EINVAL;
  1290. } else if (nt == 0x3) {
  1291. dev_err(dev->dev,
  1292. "Invalid value 0x3 for additional trans./microframe\n");
  1293. return -EINVAL;
  1294. } else if ((nt + 1) * max > buffer_size) {
  1295. dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
  1296. buffer_size, (nt + 1), max);
  1297. return -EINVAL;
  1298. } else if (max == 0) {
  1299. dev_err(dev->dev, "Max payload cannot be set to 0\n");
  1300. return -EINVAL;
  1301. } else if (max > ep->ep.maxpacket_limit) {
  1302. dev_err(dev->dev, "Requested max payload %d > limit %d\n",
  1303. max, ep->ep.maxpacket_limit);
  1304. return -EINVAL;
  1305. }
  1306. spin_lock(&ep->dev->lock);
  1307. if (!ep->stopped) {
  1308. spin_unlock(&ep->dev->lock);
  1309. return -EBUSY;
  1310. }
  1311. ep->stopped = 0;
  1312. ep->wedged = 0;
  1313. ep->ep.desc = desc;
  1314. ep->ep.maxpacket = max;
  1315. ep->dma_start = 0;
  1316. if (nt) {
  1317. /*
  1318. * Maximum possible size of all payloads in one microframe
  1319. * regardless of direction when using high-bandwidth mode.
  1320. */
  1321. ep->bytes_per_buffer = (nt + 1) * max;
  1322. } else if (ep->is_in) {
  1323. /*
  1324. * The biggest multiple of maximum packet size that fits into
  1325. * the buffer. The hardware will split up into many packets in
  1326. * the IN direction.
  1327. */
  1328. ep->bytes_per_buffer = (buffer_size / max) * max;
  1329. } else {
  1330. /*
  1331. * Only single packets will be placed the buffers in the OUT
  1332. * direction.
  1333. */
  1334. ep->bytes_per_buffer = max;
  1335. }
  1336. epctrl = (max << GR_EPCTRL_MAXPL_POS)
  1337. | (nt << GR_EPCTRL_NT_POS)
  1338. | (mode << GR_EPCTRL_TT_POS)
  1339. | GR_EPCTRL_EV;
  1340. if (ep->is_in)
  1341. epctrl |= GR_EPCTRL_PI;
  1342. gr_write32(&ep->regs->epctrl, epctrl);
  1343. gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
  1344. spin_unlock(&ep->dev->lock);
  1345. dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
  1346. ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
  1347. return 0;
  1348. }
  1349. /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
  1350. static int gr_ep_disable(struct usb_ep *_ep)
  1351. {
  1352. struct gr_ep *ep;
  1353. struct gr_udc *dev;
  1354. unsigned long flags;
  1355. ep = container_of(_ep, struct gr_ep, ep);
  1356. if (!_ep || !ep->ep.desc)
  1357. return -ENODEV;
  1358. dev = ep->dev;
  1359. /* 'ep0' IN and OUT are reserved */
  1360. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1361. return -EINVAL;
  1362. if (dev->ep0state == GR_EP0_SUSPEND)
  1363. return -EBUSY;
  1364. dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
  1365. spin_lock_irqsave(&dev->lock, flags);
  1366. gr_ep_nuke(ep);
  1367. gr_ep_reset(ep);
  1368. ep->ep.desc = NULL;
  1369. spin_unlock_irqrestore(&dev->lock, flags);
  1370. return 0;
  1371. }
  1372. /*
  1373. * Frees a request, but not any DMA buffers associated with it
  1374. * (gr_finish_request should already have taken care of that).
  1375. */
  1376. static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
  1377. {
  1378. struct gr_request *req;
  1379. if (!_ep || !_req)
  1380. return;
  1381. req = container_of(_req, struct gr_request, req);
  1382. /* Leads to memory leak */
  1383. WARN(!list_empty(&req->queue),
  1384. "request not dequeued properly before freeing\n");
  1385. kfree(req);
  1386. }
  1387. /* Queue a request from the gadget */
  1388. static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
  1389. gfp_t gfp_flags)
  1390. {
  1391. struct gr_ep *ep;
  1392. struct gr_request *req;
  1393. struct gr_udc *dev;
  1394. int ret;
  1395. if (unlikely(!_ep || !_req))
  1396. return -EINVAL;
  1397. ep = container_of(_ep, struct gr_ep, ep);
  1398. req = container_of(_req, struct gr_request, req);
  1399. dev = ep->dev;
  1400. spin_lock(&ep->dev->lock);
  1401. /*
  1402. * The ep0 pointer in the gadget struct is used both for ep0in and
  1403. * ep0out. In a data stage in the out direction ep0out needs to be used
  1404. * instead of the default ep0in. Completion functions might use
  1405. * driver_data, so that needs to be copied as well.
  1406. */
  1407. if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
  1408. ep = &dev->epo[0];
  1409. ep->ep.driver_data = dev->epi[0].ep.driver_data;
  1410. }
  1411. if (ep->is_in)
  1412. gr_dbgprint_request("EXTERN", ep, req);
  1413. ret = gr_queue(ep, req, GFP_ATOMIC);
  1414. spin_unlock(&ep->dev->lock);
  1415. return ret;
  1416. }
  1417. /* Dequeue JUST ONE request */
  1418. static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  1419. {
  1420. struct gr_request *req;
  1421. struct gr_ep *ep;
  1422. struct gr_udc *dev;
  1423. int ret = 0;
  1424. unsigned long flags;
  1425. ep = container_of(_ep, struct gr_ep, ep);
  1426. if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
  1427. return -EINVAL;
  1428. dev = ep->dev;
  1429. if (!dev->driver)
  1430. return -ESHUTDOWN;
  1431. /* We can't touch (DMA) registers when suspended */
  1432. if (dev->ep0state == GR_EP0_SUSPEND)
  1433. return -EBUSY;
  1434. spin_lock_irqsave(&dev->lock, flags);
  1435. /* Make sure it's actually queued on this endpoint */
  1436. list_for_each_entry(req, &ep->queue, queue) {
  1437. if (&req->req == _req)
  1438. break;
  1439. }
  1440. if (&req->req != _req) {
  1441. ret = -EINVAL;
  1442. goto out;
  1443. }
  1444. if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
  1445. /* This request is currently being processed */
  1446. gr_abort_dma(ep);
  1447. if (ep->stopped)
  1448. gr_finish_request(ep, req, -ECONNRESET);
  1449. else
  1450. gr_dma_advance(ep, -ECONNRESET);
  1451. } else if (!list_empty(&req->queue)) {
  1452. /* Not being processed - gr_finish_request dequeues it */
  1453. gr_finish_request(ep, req, -ECONNRESET);
  1454. } else {
  1455. ret = -EOPNOTSUPP;
  1456. }
  1457. out:
  1458. spin_unlock_irqrestore(&dev->lock, flags);
  1459. return ret;
  1460. }
  1461. /* Helper for gr_set_halt and gr_set_wedge */
  1462. static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  1463. {
  1464. int ret;
  1465. struct gr_ep *ep;
  1466. if (!_ep)
  1467. return -ENODEV;
  1468. ep = container_of(_ep, struct gr_ep, ep);
  1469. spin_lock(&ep->dev->lock);
  1470. /* Halting an IN endpoint should fail if queue is not empty */
  1471. if (halt && ep->is_in && !list_empty(&ep->queue)) {
  1472. ret = -EAGAIN;
  1473. goto out;
  1474. }
  1475. ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
  1476. out:
  1477. spin_unlock(&ep->dev->lock);
  1478. return ret;
  1479. }
  1480. /* Halt endpoint */
  1481. static int gr_set_halt(struct usb_ep *_ep, int halt)
  1482. {
  1483. return gr_set_halt_wedge(_ep, halt, 0);
  1484. }
  1485. /* Halt and wedge endpoint */
  1486. static int gr_set_wedge(struct usb_ep *_ep)
  1487. {
  1488. return gr_set_halt_wedge(_ep, 1, 1);
  1489. }
  1490. /*
  1491. * Return the total number of bytes currently stored in the internal buffers of
  1492. * the endpoint.
  1493. */
  1494. static int gr_fifo_status(struct usb_ep *_ep)
  1495. {
  1496. struct gr_ep *ep;
  1497. u32 epstat;
  1498. u32 bytes = 0;
  1499. if (!_ep)
  1500. return -ENODEV;
  1501. ep = container_of(_ep, struct gr_ep, ep);
  1502. epstat = gr_read32(&ep->regs->epstat);
  1503. if (epstat & GR_EPSTAT_B0)
  1504. bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
  1505. if (epstat & GR_EPSTAT_B1)
  1506. bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
  1507. return bytes;
  1508. }
  1509. /* Empty data from internal buffers of an endpoint. */
  1510. static void gr_fifo_flush(struct usb_ep *_ep)
  1511. {
  1512. struct gr_ep *ep;
  1513. u32 epctrl;
  1514. if (!_ep)
  1515. return;
  1516. ep = container_of(_ep, struct gr_ep, ep);
  1517. dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
  1518. spin_lock(&ep->dev->lock);
  1519. epctrl = gr_read32(&ep->regs->epctrl);
  1520. epctrl |= GR_EPCTRL_CB;
  1521. gr_write32(&ep->regs->epctrl, epctrl);
  1522. spin_unlock(&ep->dev->lock);
  1523. }
  1524. static struct usb_ep_ops gr_ep_ops = {
  1525. .enable = gr_ep_enable,
  1526. .disable = gr_ep_disable,
  1527. .alloc_request = gr_alloc_request,
  1528. .free_request = gr_free_request,
  1529. .queue = gr_queue_ext,
  1530. .dequeue = gr_dequeue,
  1531. .set_halt = gr_set_halt,
  1532. .set_wedge = gr_set_wedge,
  1533. .fifo_status = gr_fifo_status,
  1534. .fifo_flush = gr_fifo_flush,
  1535. };
  1536. /* ---------------------------------------------------------------------- */
  1537. /* USB Gadget ops */
  1538. static int gr_get_frame(struct usb_gadget *_gadget)
  1539. {
  1540. struct gr_udc *dev;
  1541. if (!_gadget)
  1542. return -ENODEV;
  1543. dev = container_of(_gadget, struct gr_udc, gadget);
  1544. return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
  1545. }
  1546. static int gr_wakeup(struct usb_gadget *_gadget)
  1547. {
  1548. struct gr_udc *dev;
  1549. if (!_gadget)
  1550. return -ENODEV;
  1551. dev = container_of(_gadget, struct gr_udc, gadget);
  1552. /* Remote wakeup feature not enabled by host*/
  1553. if (!dev->remote_wakeup)
  1554. return -EINVAL;
  1555. spin_lock(&dev->lock);
  1556. gr_write32(&dev->regs->control,
  1557. gr_read32(&dev->regs->control) | GR_CONTROL_RW);
  1558. spin_unlock(&dev->lock);
  1559. return 0;
  1560. }
  1561. static int gr_pullup(struct usb_gadget *_gadget, int is_on)
  1562. {
  1563. struct gr_udc *dev;
  1564. u32 control;
  1565. if (!_gadget)
  1566. return -ENODEV;
  1567. dev = container_of(_gadget, struct gr_udc, gadget);
  1568. spin_lock(&dev->lock);
  1569. control = gr_read32(&dev->regs->control);
  1570. if (is_on)
  1571. control |= GR_CONTROL_EP;
  1572. else
  1573. control &= ~GR_CONTROL_EP;
  1574. gr_write32(&dev->regs->control, control);
  1575. spin_unlock(&dev->lock);
  1576. return 0;
  1577. }
  1578. static int gr_udc_start(struct usb_gadget *gadget,
  1579. struct usb_gadget_driver *driver)
  1580. {
  1581. struct gr_udc *dev = to_gr_udc(gadget);
  1582. spin_lock(&dev->lock);
  1583. /* Hook up the driver */
  1584. driver->driver.bus = NULL;
  1585. dev->driver = driver;
  1586. /* Get ready for host detection */
  1587. gr_enable_vbus_detect(dev);
  1588. spin_unlock(&dev->lock);
  1589. return 0;
  1590. }
  1591. static int gr_udc_stop(struct usb_gadget *gadget)
  1592. {
  1593. struct gr_udc *dev = to_gr_udc(gadget);
  1594. unsigned long flags;
  1595. spin_lock_irqsave(&dev->lock, flags);
  1596. dev->driver = NULL;
  1597. gr_stop_activity(dev);
  1598. spin_unlock_irqrestore(&dev->lock, flags);
  1599. return 0;
  1600. }
  1601. static const struct usb_gadget_ops gr_ops = {
  1602. .get_frame = gr_get_frame,
  1603. .wakeup = gr_wakeup,
  1604. .pullup = gr_pullup,
  1605. .udc_start = gr_udc_start,
  1606. .udc_stop = gr_udc_stop,
  1607. /* Other operations not supported */
  1608. };
  1609. /* ---------------------------------------------------------------------- */
  1610. /* Module probe, removal and of-matching */
  1611. static const char * const onames[] = {
  1612. "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
  1613. "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
  1614. "ep12out", "ep13out", "ep14out", "ep15out"
  1615. };
  1616. static const char * const inames[] = {
  1617. "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
  1618. "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
  1619. "ep12in", "ep13in", "ep14in", "ep15in"
  1620. };
  1621. /* Must be called with dev->lock held */
  1622. static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
  1623. {
  1624. struct gr_ep *ep;
  1625. struct gr_request *req;
  1626. struct usb_request *_req;
  1627. void *buf;
  1628. if (is_in) {
  1629. ep = &dev->epi[num];
  1630. ep->ep.name = inames[num];
  1631. ep->regs = &dev->regs->epi[num];
  1632. } else {
  1633. ep = &dev->epo[num];
  1634. ep->ep.name = onames[num];
  1635. ep->regs = &dev->regs->epo[num];
  1636. }
  1637. gr_ep_reset(ep);
  1638. ep->num = num;
  1639. ep->is_in = is_in;
  1640. ep->dev = dev;
  1641. ep->ep.ops = &gr_ep_ops;
  1642. INIT_LIST_HEAD(&ep->queue);
  1643. if (num == 0) {
  1644. _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
  1645. buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
  1646. if (!_req || !buf) {
  1647. /* possible _req freed by gr_probe via gr_remove */
  1648. return -ENOMEM;
  1649. }
  1650. req = container_of(_req, struct gr_request, req);
  1651. req->req.buf = buf;
  1652. req->req.length = MAX_CTRL_PL_SIZE;
  1653. if (is_in)
  1654. dev->ep0reqi = req; /* Complete gets set as used */
  1655. else
  1656. dev->ep0reqo = req; /* Completion treated separately */
  1657. usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
  1658. ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
  1659. } else {
  1660. usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
  1661. list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
  1662. }
  1663. list_add_tail(&ep->ep_list, &dev->ep_list);
  1664. ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
  1665. &ep->tailbuf_paddr, GFP_ATOMIC);
  1666. if (!ep->tailbuf)
  1667. return -ENOMEM;
  1668. return 0;
  1669. }
  1670. /* Must be called with dev->lock held */
  1671. static int gr_udc_init(struct gr_udc *dev)
  1672. {
  1673. struct device_node *np = dev->dev->of_node;
  1674. u32 epctrl_val;
  1675. u32 dmactrl_val;
  1676. int i;
  1677. int ret = 0;
  1678. u32 bufsize;
  1679. gr_set_address(dev, 0);
  1680. INIT_LIST_HEAD(&dev->gadget.ep_list);
  1681. dev->gadget.speed = USB_SPEED_UNKNOWN;
  1682. dev->gadget.ep0 = &dev->epi[0].ep;
  1683. INIT_LIST_HEAD(&dev->ep_list);
  1684. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1685. for (i = 0; i < dev->nepo; i++) {
  1686. if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
  1687. bufsize = 1024;
  1688. ret = gr_ep_init(dev, i, 0, bufsize);
  1689. if (ret)
  1690. return ret;
  1691. }
  1692. for (i = 0; i < dev->nepi; i++) {
  1693. if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
  1694. bufsize = 1024;
  1695. ret = gr_ep_init(dev, i, 1, bufsize);
  1696. if (ret)
  1697. return ret;
  1698. }
  1699. /* Must be disabled by default */
  1700. dev->remote_wakeup = 0;
  1701. /* Enable ep0out and ep0in */
  1702. epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
  1703. dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
  1704. gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
  1705. gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
  1706. gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
  1707. gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
  1708. return 0;
  1709. }
  1710. static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
  1711. {
  1712. struct gr_ep *ep;
  1713. if (is_in)
  1714. ep = &dev->epi[num];
  1715. else
  1716. ep = &dev->epo[num];
  1717. if (ep->tailbuf)
  1718. dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
  1719. ep->tailbuf, ep->tailbuf_paddr);
  1720. }
  1721. static int gr_remove(struct platform_device *pdev)
  1722. {
  1723. struct gr_udc *dev = platform_get_drvdata(pdev);
  1724. int i;
  1725. if (dev->added)
  1726. usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
  1727. if (dev->driver)
  1728. return -EBUSY;
  1729. gr_dfs_delete(dev);
  1730. if (dev->desc_pool)
  1731. dma_pool_destroy(dev->desc_pool);
  1732. platform_set_drvdata(pdev, NULL);
  1733. gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
  1734. gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
  1735. for (i = 0; i < dev->nepo; i++)
  1736. gr_ep_remove(dev, i, 0);
  1737. for (i = 0; i < dev->nepi; i++)
  1738. gr_ep_remove(dev, i, 1);
  1739. return 0;
  1740. }
  1741. static int gr_request_irq(struct gr_udc *dev, int irq)
  1742. {
  1743. return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
  1744. IRQF_SHARED, driver_name, dev);
  1745. }
  1746. static int gr_probe(struct platform_device *pdev)
  1747. {
  1748. struct gr_udc *dev;
  1749. struct resource *res;
  1750. struct gr_regs __iomem *regs;
  1751. int retval;
  1752. u32 status;
  1753. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  1754. if (!dev)
  1755. return -ENOMEM;
  1756. dev->dev = &pdev->dev;
  1757. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1758. regs = devm_ioremap_resource(dev->dev, res);
  1759. if (IS_ERR(regs))
  1760. return PTR_ERR(regs);
  1761. dev->irq = platform_get_irq(pdev, 0);
  1762. if (dev->irq <= 0) {
  1763. dev_err(dev->dev, "No irq found\n");
  1764. return -ENODEV;
  1765. }
  1766. /* Some core configurations has separate irqs for IN and OUT events */
  1767. dev->irqi = platform_get_irq(pdev, 1);
  1768. if (dev->irqi > 0) {
  1769. dev->irqo = platform_get_irq(pdev, 2);
  1770. if (dev->irqo <= 0) {
  1771. dev_err(dev->dev, "Found irqi but not irqo\n");
  1772. return -ENODEV;
  1773. }
  1774. } else {
  1775. dev->irqi = 0;
  1776. }
  1777. dev->gadget.name = driver_name;
  1778. dev->gadget.max_speed = USB_SPEED_HIGH;
  1779. dev->gadget.ops = &gr_ops;
  1780. spin_lock_init(&dev->lock);
  1781. dev->regs = regs;
  1782. platform_set_drvdata(pdev, dev);
  1783. /* Determine number of endpoints and data interface mode */
  1784. status = gr_read32(&dev->regs->status);
  1785. dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
  1786. dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
  1787. if (!(status & GR_STATUS_DM)) {
  1788. dev_err(dev->dev, "Slave mode cores are not supported\n");
  1789. return -ENODEV;
  1790. }
  1791. /* --- Effects of the following calls might need explicit cleanup --- */
  1792. /* Create DMA pool for descriptors */
  1793. dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
  1794. sizeof(struct gr_dma_desc), 4, 0);
  1795. if (!dev->desc_pool) {
  1796. dev_err(dev->dev, "Could not allocate DMA pool");
  1797. return -ENOMEM;
  1798. }
  1799. spin_lock(&dev->lock);
  1800. /* Inside lock so that no gadget can use this udc until probe is done */
  1801. retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
  1802. if (retval) {
  1803. dev_err(dev->dev, "Could not add gadget udc");
  1804. goto out;
  1805. }
  1806. dev->added = 1;
  1807. retval = gr_udc_init(dev);
  1808. if (retval)
  1809. goto out;
  1810. gr_dfs_create(dev);
  1811. /* Clear all interrupt enables that might be left on since last boot */
  1812. gr_disable_interrupts_and_pullup(dev);
  1813. retval = gr_request_irq(dev, dev->irq);
  1814. if (retval) {
  1815. dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
  1816. goto out;
  1817. }
  1818. if (dev->irqi) {
  1819. retval = gr_request_irq(dev, dev->irqi);
  1820. if (retval) {
  1821. dev_err(dev->dev, "Failed to request irqi %d\n",
  1822. dev->irqi);
  1823. goto out;
  1824. }
  1825. retval = gr_request_irq(dev, dev->irqo);
  1826. if (retval) {
  1827. dev_err(dev->dev, "Failed to request irqo %d\n",
  1828. dev->irqo);
  1829. goto out;
  1830. }
  1831. }
  1832. if (dev->irqi)
  1833. dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
  1834. dev->irq, dev->irqi, dev->irqo);
  1835. else
  1836. dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
  1837. out:
  1838. spin_unlock(&dev->lock);
  1839. if (retval)
  1840. gr_remove(pdev);
  1841. return retval;
  1842. }
  1843. static const struct of_device_id gr_match[] = {
  1844. {.name = "GAISLER_USBDC"},
  1845. {.name = "01_021"},
  1846. {},
  1847. };
  1848. MODULE_DEVICE_TABLE(of, gr_match);
  1849. static struct platform_driver gr_driver = {
  1850. .driver = {
  1851. .name = DRIVER_NAME,
  1852. .of_match_table = gr_match,
  1853. },
  1854. .probe = gr_probe,
  1855. .remove = gr_remove,
  1856. };
  1857. module_platform_driver(gr_driver);
  1858. MODULE_AUTHOR("Aeroflex Gaisler AB.");
  1859. MODULE_DESCRIPTION(DRIVER_DESC);
  1860. MODULE_LICENSE("GPL");