gr_udc.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277
  1. /*
  2. * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
  3. *
  4. * 2013 (c) Aeroflex Gaisler AB
  5. *
  6. * This driver supports GRUSBDC USB Device Controller cores available in the
  7. * GRLIB VHDL IP core library.
  8. *
  9. * Full documentation of the GRUSBDC core can be found here:
  10. * http://www.gaisler.com/products/grlib/grip.pdf
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the License, or (at your
  15. * option) any later version.
  16. *
  17. * Contributors:
  18. * - Andreas Larsson <andreas@gaisler.com>
  19. * - Marko Isomaki
  20. */
  21. /*
  22. * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
  23. * individually configurable to any of the four USB transfer types. This driver
  24. * only supports cores in DMA mode.
  25. */
  26. #include <linux/kernel.h>
  27. #include <linux/module.h>
  28. #include <linux/slab.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/errno.h>
  31. #include <linux/list.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/device.h>
  34. #include <linux/usb/ch9.h>
  35. #include <linux/usb/gadget.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/dmapool.h>
  38. #include <linux/debugfs.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/of_platform.h>
  41. #include <linux/of_irq.h>
  42. #include <linux/of_address.h>
  43. #include <asm/byteorder.h>
  44. #include "gr_udc.h"
  45. #define DRIVER_NAME "gr_udc"
  46. #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
  47. static const char driver_name[] = DRIVER_NAME;
  48. static const char driver_desc[] = DRIVER_DESC;
  49. #define gr_read32(x) (ioread32be((x)))
  50. #define gr_write32(x, v) (iowrite32be((v), (x)))
  51. /* USB speed and corresponding string calculated from status register value */
  52. #define GR_SPEED(status) \
  53. ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
  54. #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
  55. /* Size of hardware buffer calculated from epctrl register value */
  56. #define GR_BUFFER_SIZE(epctrl) \
  57. ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
  58. GR_EPCTRL_BUFSZ_SCALER)
  59. /* ---------------------------------------------------------------------- */
  60. /* Debug printout functionality */
  61. static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
  62. static const char *gr_ep0state_string(enum gr_ep0state state)
  63. {
  64. static const char *const names[] = {
  65. [GR_EP0_DISCONNECT] = "disconnect",
  66. [GR_EP0_SETUP] = "setup",
  67. [GR_EP0_IDATA] = "idata",
  68. [GR_EP0_ODATA] = "odata",
  69. [GR_EP0_ISTATUS] = "istatus",
  70. [GR_EP0_OSTATUS] = "ostatus",
  71. [GR_EP0_STALL] = "stall",
  72. [GR_EP0_SUSPEND] = "suspend",
  73. };
  74. if (state < 0 || state >= ARRAY_SIZE(names))
  75. return "UNKNOWN";
  76. return names[state];
  77. }
  78. #ifdef VERBOSE_DEBUG
  79. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  80. struct gr_request *req)
  81. {
  82. int buflen = ep->is_in ? req->req.length : req->req.actual;
  83. int rowlen = 32;
  84. int plen = min(rowlen, buflen);
  85. dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
  86. (buflen > plen ? " (truncated)" : ""));
  87. print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
  88. rowlen, 4, req->req.buf, plen, false);
  89. }
  90. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  91. u16 value, u16 index, u16 length)
  92. {
  93. dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
  94. type, request, value, index, length);
  95. }
  96. #else /* !VERBOSE_DEBUG */
  97. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  98. struct gr_request *req) {}
  99. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  100. u16 value, u16 index, u16 length) {}
  101. #endif /* VERBOSE_DEBUG */
  102. /* ---------------------------------------------------------------------- */
  103. /* Debugfs functionality */
  104. #ifdef CONFIG_USB_GADGET_DEBUG_FS
  105. static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
  106. {
  107. u32 epctrl = gr_read32(&ep->regs->epctrl);
  108. u32 epstat = gr_read32(&ep->regs->epstat);
  109. int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
  110. struct gr_request *req;
  111. seq_printf(seq, "%s:\n", ep->ep.name);
  112. seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
  113. seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
  114. seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
  115. seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
  116. seq_printf(seq, " dma_start = %d\n", ep->dma_start);
  117. seq_printf(seq, " stopped = %d\n", ep->stopped);
  118. seq_printf(seq, " wedged = %d\n", ep->wedged);
  119. seq_printf(seq, " callback = %d\n", ep->callback);
  120. seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
  121. seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
  122. seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
  123. if (mode == 1 || mode == 3)
  124. seq_printf(seq, " nt = %d\n",
  125. (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
  126. seq_printf(seq, " Buffer 0: %s %s%d\n",
  127. epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
  128. epstat & GR_EPSTAT_BS ? " " : "selected ",
  129. (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
  130. seq_printf(seq, " Buffer 1: %s %s%d\n",
  131. epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
  132. epstat & GR_EPSTAT_BS ? "selected " : " ",
  133. (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
  134. if (list_empty(&ep->queue)) {
  135. seq_puts(seq, " Queue: empty\n\n");
  136. return;
  137. }
  138. seq_puts(seq, " Queue:\n");
  139. list_for_each_entry(req, &ep->queue, queue) {
  140. struct gr_dma_desc *desc;
  141. struct gr_dma_desc *next;
  142. seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
  143. &req->req.buf, req->req.actual, req->req.length);
  144. next = req->first_desc;
  145. do {
  146. desc = next;
  147. next = desc->next_desc;
  148. seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
  149. desc == req->curr_desc ? 'c' : ' ',
  150. desc, desc->paddr, desc->ctrl, desc->data);
  151. } while (desc != req->last_desc);
  152. }
  153. seq_puts(seq, "\n");
  154. }
  155. static int gr_seq_show(struct seq_file *seq, void *v)
  156. {
  157. struct gr_udc *dev = seq->private;
  158. u32 control = gr_read32(&dev->regs->control);
  159. u32 status = gr_read32(&dev->regs->status);
  160. struct gr_ep *ep;
  161. seq_printf(seq, "usb state = %s\n",
  162. usb_state_string(dev->gadget.state));
  163. seq_printf(seq, "address = %d\n",
  164. (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
  165. seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
  166. seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
  167. seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
  168. seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
  169. seq_printf(seq, "test_mode = %d\n", dev->test_mode);
  170. seq_puts(seq, "\n");
  171. list_for_each_entry(ep, &dev->ep_list, ep_list)
  172. gr_seq_ep_show(seq, ep);
  173. return 0;
  174. }
  175. static int gr_dfs_open(struct inode *inode, struct file *file)
  176. {
  177. return single_open(file, gr_seq_show, inode->i_private);
  178. }
  179. static const struct file_operations gr_dfs_fops = {
  180. .owner = THIS_MODULE,
  181. .open = gr_dfs_open,
  182. .read = seq_read,
  183. .llseek = seq_lseek,
  184. .release = single_release,
  185. };
  186. static void gr_dfs_create(struct gr_udc *dev)
  187. {
  188. const char *name = "gr_udc_state";
  189. dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
  190. dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root, dev,
  191. &gr_dfs_fops);
  192. }
  193. static void gr_dfs_delete(struct gr_udc *dev)
  194. {
  195. /* Handles NULL and ERR pointers internally */
  196. debugfs_remove(dev->dfs_state);
  197. debugfs_remove(dev->dfs_root);
  198. }
  199. #else /* !CONFIG_USB_GADGET_DEBUG_FS */
  200. static void gr_dfs_create(struct gr_udc *dev) {}
  201. static void gr_dfs_delete(struct gr_udc *dev) {}
  202. #endif /* CONFIG_USB_GADGET_DEBUG_FS */
  203. /* ---------------------------------------------------------------------- */
  204. /* DMA and request handling */
  205. /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
  206. static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
  207. {
  208. dma_addr_t paddr;
  209. struct gr_dma_desc *dma_desc;
  210. dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr);
  211. if (!dma_desc) {
  212. dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
  213. return NULL;
  214. }
  215. dma_desc->paddr = paddr;
  216. return dma_desc;
  217. }
  218. static inline void gr_free_dma_desc(struct gr_udc *dev,
  219. struct gr_dma_desc *desc)
  220. {
  221. dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
  222. }
  223. /* Frees the chain of struct gr_dma_desc for the given request */
  224. static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
  225. {
  226. struct gr_dma_desc *desc;
  227. struct gr_dma_desc *next;
  228. next = req->first_desc;
  229. if (!next)
  230. return;
  231. do {
  232. desc = next;
  233. next = desc->next_desc;
  234. gr_free_dma_desc(dev, desc);
  235. } while (desc != req->last_desc);
  236. req->first_desc = NULL;
  237. req->curr_desc = NULL;
  238. req->last_desc = NULL;
  239. }
  240. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
  241. /*
  242. * Frees allocated resources and calls the appropriate completion function/setup
  243. * package handler for a finished request.
  244. *
  245. * Must be called with dev->lock held and irqs disabled.
  246. */
  247. static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
  248. int status)
  249. __releases(&dev->lock)
  250. __acquires(&dev->lock)
  251. {
  252. struct gr_udc *dev;
  253. list_del_init(&req->queue);
  254. if (likely(req->req.status == -EINPROGRESS))
  255. req->req.status = status;
  256. else
  257. status = req->req.status;
  258. dev = ep->dev;
  259. usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
  260. gr_free_dma_desc_chain(dev, req);
  261. if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
  262. req->req.actual = req->req.length;
  263. } else if (req->oddlen && req->req.actual > req->evenlen) {
  264. /*
  265. * Copy to user buffer in this case where length was not evenly
  266. * divisible by ep->ep.maxpacket and the last descriptor was
  267. * actually used.
  268. */
  269. char *buftail = ((char *)req->req.buf + req->evenlen);
  270. memcpy(buftail, ep->tailbuf, req->oddlen);
  271. if (req->req.actual > req->req.length) {
  272. /* We got more data than was requested */
  273. dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
  274. ep->ep.name);
  275. gr_dbgprint_request("OVFL", ep, req);
  276. req->req.status = -EOVERFLOW;
  277. }
  278. }
  279. if (!status) {
  280. if (ep->is_in)
  281. gr_dbgprint_request("SENT", ep, req);
  282. else
  283. gr_dbgprint_request("RECV", ep, req);
  284. }
  285. /* Prevent changes to ep->queue during callback */
  286. ep->callback = 1;
  287. if (req == dev->ep0reqo && !status) {
  288. if (req->setup)
  289. gr_ep0_setup(dev, req);
  290. else
  291. dev_err(dev->dev,
  292. "Unexpected non setup packet on ep0in\n");
  293. } else if (req->req.complete) {
  294. spin_unlock(&dev->lock);
  295. usb_gadget_giveback_request(&ep->ep, &req->req);
  296. spin_lock(&dev->lock);
  297. }
  298. ep->callback = 0;
  299. }
  300. static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  301. {
  302. struct gr_request *req;
  303. req = kzalloc(sizeof(*req), gfp_flags);
  304. if (!req)
  305. return NULL;
  306. INIT_LIST_HEAD(&req->queue);
  307. return &req->req;
  308. }
  309. /*
  310. * Starts DMA for endpoint ep if there are requests in the queue.
  311. *
  312. * Must be called with dev->lock held and with !ep->stopped.
  313. */
  314. static void gr_start_dma(struct gr_ep *ep)
  315. {
  316. struct gr_request *req;
  317. u32 dmactrl;
  318. if (list_empty(&ep->queue)) {
  319. ep->dma_start = 0;
  320. return;
  321. }
  322. req = list_first_entry(&ep->queue, struct gr_request, queue);
  323. /* A descriptor should already have been allocated */
  324. BUG_ON(!req->curr_desc);
  325. /*
  326. * The DMA controller can not handle smaller OUT buffers than
  327. * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
  328. * long packet are received. Therefore an internal bounce buffer gets
  329. * used when such a request gets enabled.
  330. */
  331. if (!ep->is_in && req->oddlen)
  332. req->last_desc->data = ep->tailbuf_paddr;
  333. wmb(); /* Make sure all is settled before handing it over to DMA */
  334. /* Set the descriptor pointer in the hardware */
  335. gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
  336. /* Announce available descriptors */
  337. dmactrl = gr_read32(&ep->regs->dmactrl);
  338. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
  339. ep->dma_start = 1;
  340. }
  341. /*
  342. * Finishes the first request in the ep's queue and, if available, starts the
  343. * next request in queue.
  344. *
  345. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  346. */
  347. static void gr_dma_advance(struct gr_ep *ep, int status)
  348. {
  349. struct gr_request *req;
  350. req = list_first_entry(&ep->queue, struct gr_request, queue);
  351. gr_finish_request(ep, req, status);
  352. gr_start_dma(ep); /* Regardless of ep->dma_start */
  353. }
  354. /*
  355. * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
  356. * transfer to be canceled and clears GR_DMACTRL_DA.
  357. *
  358. * Must be called with dev->lock held.
  359. */
  360. static void gr_abort_dma(struct gr_ep *ep)
  361. {
  362. u32 dmactrl;
  363. dmactrl = gr_read32(&ep->regs->dmactrl);
  364. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
  365. }
  366. /*
  367. * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
  368. * chain.
  369. *
  370. * Size is not used for OUT endpoints. Hardware can not be instructed to handle
  371. * smaller buffer than MAXPL in the OUT direction.
  372. */
  373. static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
  374. dma_addr_t data, unsigned size, gfp_t gfp_flags)
  375. {
  376. struct gr_dma_desc *desc;
  377. desc = gr_alloc_dma_desc(ep, gfp_flags);
  378. if (!desc)
  379. return -ENOMEM;
  380. desc->data = data;
  381. if (ep->is_in)
  382. desc->ctrl =
  383. (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
  384. else
  385. desc->ctrl = GR_DESC_OUT_CTRL_IE;
  386. if (!req->first_desc) {
  387. req->first_desc = desc;
  388. req->curr_desc = desc;
  389. } else {
  390. req->last_desc->next_desc = desc;
  391. req->last_desc->next = desc->paddr;
  392. req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
  393. }
  394. req->last_desc = desc;
  395. return 0;
  396. }
  397. /*
  398. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  399. * together covers req->req.length bytes of the buffer at DMA address
  400. * req->req.dma for the OUT direction.
  401. *
  402. * The first descriptor in the chain is enabled, the rest disabled. The
  403. * interrupt handler will later enable them one by one when needed so we can
  404. * find out when the transfer is finished. For OUT endpoints, all descriptors
  405. * therefore generate interrutps.
  406. */
  407. static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
  408. gfp_t gfp_flags)
  409. {
  410. u16 bytes_left; /* Bytes left to provide descriptors for */
  411. u16 bytes_used; /* Bytes accommodated for */
  412. int ret = 0;
  413. req->first_desc = NULL; /* Signals that no allocation is done yet */
  414. bytes_left = req->req.length;
  415. bytes_used = 0;
  416. while (bytes_left > 0) {
  417. dma_addr_t start = req->req.dma + bytes_used;
  418. u16 size = min(bytes_left, ep->bytes_per_buffer);
  419. if (size < ep->bytes_per_buffer) {
  420. /* Prepare using bounce buffer */
  421. req->evenlen = req->req.length - bytes_left;
  422. req->oddlen = size;
  423. }
  424. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  425. if (ret)
  426. goto alloc_err;
  427. bytes_left -= size;
  428. bytes_used += size;
  429. }
  430. req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  431. return 0;
  432. alloc_err:
  433. gr_free_dma_desc_chain(ep->dev, req);
  434. return ret;
  435. }
  436. /*
  437. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  438. * together covers req->req.length bytes of the buffer at DMA address
  439. * req->req.dma for the IN direction.
  440. *
  441. * When more data is provided than the maximum payload size, the hardware splits
  442. * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
  443. * is always set to a multiple of the maximum payload (restricted to the valid
  444. * number of maximum payloads during high bandwidth isochronous or interrupt
  445. * transfers)
  446. *
  447. * All descriptors are enabled from the beginning and we only generate an
  448. * interrupt for the last one indicating that the entire request has been pushed
  449. * to hardware.
  450. */
  451. static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
  452. gfp_t gfp_flags)
  453. {
  454. u16 bytes_left; /* Bytes left in req to provide descriptors for */
  455. u16 bytes_used; /* Bytes in req accommodated for */
  456. int ret = 0;
  457. req->first_desc = NULL; /* Signals that no allocation is done yet */
  458. bytes_left = req->req.length;
  459. bytes_used = 0;
  460. do { /* Allow for zero length packets */
  461. dma_addr_t start = req->req.dma + bytes_used;
  462. u16 size = min(bytes_left, ep->bytes_per_buffer);
  463. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  464. if (ret)
  465. goto alloc_err;
  466. bytes_left -= size;
  467. bytes_used += size;
  468. } while (bytes_left > 0);
  469. /*
  470. * Send an extra zero length packet to indicate that no more data is
  471. * available when req->req.zero is set and the data length is even
  472. * multiples of ep->ep.maxpacket.
  473. */
  474. if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
  475. ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
  476. if (ret)
  477. goto alloc_err;
  478. }
  479. /*
  480. * For IN packets we only want to know when the last packet has been
  481. * transmitted (not just put into internal buffers).
  482. */
  483. req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
  484. return 0;
  485. alloc_err:
  486. gr_free_dma_desc_chain(ep->dev, req);
  487. return ret;
  488. }
  489. /* Must be called with dev->lock held */
  490. static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
  491. {
  492. struct gr_udc *dev = ep->dev;
  493. int ret;
  494. if (unlikely(!ep->ep.desc && ep->num != 0)) {
  495. dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
  496. return -EINVAL;
  497. }
  498. if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
  499. dev_err(dev->dev,
  500. "Invalid request for %s: buf=%p list_empty=%d\n",
  501. ep->ep.name, req->req.buf, list_empty(&req->queue));
  502. return -EINVAL;
  503. }
  504. if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
  505. dev_err(dev->dev, "-ESHUTDOWN");
  506. return -ESHUTDOWN;
  507. }
  508. /* Can't touch registers when suspended */
  509. if (dev->ep0state == GR_EP0_SUSPEND) {
  510. dev_err(dev->dev, "-EBUSY");
  511. return -EBUSY;
  512. }
  513. /* Set up DMA mapping in case the caller didn't */
  514. ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
  515. if (ret) {
  516. dev_err(dev->dev, "usb_gadget_map_request");
  517. return ret;
  518. }
  519. if (ep->is_in)
  520. ret = gr_setup_in_desc_list(ep, req, gfp_flags);
  521. else
  522. ret = gr_setup_out_desc_list(ep, req, gfp_flags);
  523. if (ret)
  524. return ret;
  525. req->req.status = -EINPROGRESS;
  526. req->req.actual = 0;
  527. list_add_tail(&req->queue, &ep->queue);
  528. /* Start DMA if not started, otherwise interrupt handler handles it */
  529. if (!ep->dma_start && likely(!ep->stopped))
  530. gr_start_dma(ep);
  531. return 0;
  532. }
  533. /*
  534. * Queue a request from within the driver.
  535. *
  536. * Must be called with dev->lock held.
  537. */
  538. static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
  539. gfp_t gfp_flags)
  540. {
  541. if (ep->is_in)
  542. gr_dbgprint_request("RESP", ep, req);
  543. return gr_queue(ep, req, gfp_flags);
  544. }
  545. /* ---------------------------------------------------------------------- */
  546. /* General helper functions */
  547. /*
  548. * Dequeue ALL requests.
  549. *
  550. * Must be called with dev->lock held and irqs disabled.
  551. */
  552. static void gr_ep_nuke(struct gr_ep *ep)
  553. {
  554. struct gr_request *req;
  555. ep->stopped = 1;
  556. ep->dma_start = 0;
  557. gr_abort_dma(ep);
  558. while (!list_empty(&ep->queue)) {
  559. req = list_first_entry(&ep->queue, struct gr_request, queue);
  560. gr_finish_request(ep, req, -ESHUTDOWN);
  561. }
  562. }
  563. /*
  564. * Reset the hardware state of this endpoint.
  565. *
  566. * Must be called with dev->lock held.
  567. */
  568. static void gr_ep_reset(struct gr_ep *ep)
  569. {
  570. gr_write32(&ep->regs->epctrl, 0);
  571. gr_write32(&ep->regs->dmactrl, 0);
  572. ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
  573. ep->ep.desc = NULL;
  574. ep->stopped = 1;
  575. ep->dma_start = 0;
  576. }
  577. /*
  578. * Generate STALL on ep0in/out.
  579. *
  580. * Must be called with dev->lock held.
  581. */
  582. static void gr_control_stall(struct gr_udc *dev)
  583. {
  584. u32 epctrl;
  585. epctrl = gr_read32(&dev->epo[0].regs->epctrl);
  586. gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  587. epctrl = gr_read32(&dev->epi[0].regs->epctrl);
  588. gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  589. dev->ep0state = GR_EP0_STALL;
  590. }
  591. /*
  592. * Halts, halts and wedges, or clears halt for an endpoint.
  593. *
  594. * Must be called with dev->lock held.
  595. */
  596. static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
  597. {
  598. u32 epctrl;
  599. int retval = 0;
  600. if (ep->num && !ep->ep.desc)
  601. return -EINVAL;
  602. if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
  603. return -EOPNOTSUPP;
  604. /* Never actually halt ep0, and therefore never clear halt for ep0 */
  605. if (!ep->num) {
  606. if (halt && !fromhost) {
  607. /* ep0 halt from gadget - generate protocol stall */
  608. gr_control_stall(ep->dev);
  609. dev_dbg(ep->dev->dev, "EP: stall ep0\n");
  610. return 0;
  611. }
  612. return -EINVAL;
  613. }
  614. dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
  615. (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
  616. epctrl = gr_read32(&ep->regs->epctrl);
  617. if (halt) {
  618. /* Set HALT */
  619. gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
  620. ep->stopped = 1;
  621. if (wedge)
  622. ep->wedged = 1;
  623. } else {
  624. gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
  625. ep->stopped = 0;
  626. ep->wedged = 0;
  627. /* Things might have been queued up in the meantime */
  628. if (!ep->dma_start)
  629. gr_start_dma(ep);
  630. }
  631. return retval;
  632. }
  633. /* Must be called with dev->lock held */
  634. static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
  635. {
  636. if (dev->ep0state != value)
  637. dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
  638. gr_ep0state_string(value));
  639. dev->ep0state = value;
  640. }
  641. /*
  642. * Should only be called when endpoints can not generate interrupts.
  643. *
  644. * Must be called with dev->lock held.
  645. */
  646. static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
  647. {
  648. gr_write32(&dev->regs->control, 0);
  649. wmb(); /* Make sure that we do not deny one of our interrupts */
  650. dev->irq_enabled = 0;
  651. }
  652. /*
  653. * Stop all device activity and disable data line pullup.
  654. *
  655. * Must be called with dev->lock held and irqs disabled.
  656. */
  657. static void gr_stop_activity(struct gr_udc *dev)
  658. {
  659. struct gr_ep *ep;
  660. list_for_each_entry(ep, &dev->ep_list, ep_list)
  661. gr_ep_nuke(ep);
  662. gr_disable_interrupts_and_pullup(dev);
  663. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  664. usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
  665. }
  666. /* ---------------------------------------------------------------------- */
  667. /* ep0 setup packet handling */
  668. static void gr_ep0_testmode_complete(struct usb_ep *_ep,
  669. struct usb_request *_req)
  670. {
  671. struct gr_ep *ep;
  672. struct gr_udc *dev;
  673. u32 control;
  674. ep = container_of(_ep, struct gr_ep, ep);
  675. dev = ep->dev;
  676. spin_lock(&dev->lock);
  677. control = gr_read32(&dev->regs->control);
  678. control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
  679. gr_write32(&dev->regs->control, control);
  680. spin_unlock(&dev->lock);
  681. }
  682. static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
  683. {
  684. /* Nothing needs to be done here */
  685. }
  686. /*
  687. * Queue a response on ep0in.
  688. *
  689. * Must be called with dev->lock held.
  690. */
  691. static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
  692. void (*complete)(struct usb_ep *ep,
  693. struct usb_request *req))
  694. {
  695. u8 *reqbuf = dev->ep0reqi->req.buf;
  696. int status;
  697. int i;
  698. for (i = 0; i < length; i++)
  699. reqbuf[i] = buf[i];
  700. dev->ep0reqi->req.length = length;
  701. dev->ep0reqi->req.complete = complete;
  702. status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
  703. if (status < 0)
  704. dev_err(dev->dev,
  705. "Could not queue ep0in setup response: %d\n", status);
  706. return status;
  707. }
  708. /*
  709. * Queue a 2 byte response on ep0in.
  710. *
  711. * Must be called with dev->lock held.
  712. */
  713. static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
  714. {
  715. __le16 le_response = cpu_to_le16(response);
  716. return gr_ep0_respond(dev, (u8 *)&le_response, 2,
  717. gr_ep0_dummy_complete);
  718. }
  719. /*
  720. * Queue a ZLP response on ep0in.
  721. *
  722. * Must be called with dev->lock held.
  723. */
  724. static inline int gr_ep0_respond_empty(struct gr_udc *dev)
  725. {
  726. return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
  727. }
  728. /*
  729. * This is run when a SET_ADDRESS request is received. First writes
  730. * the new address to the control register which is updated internally
  731. * when the next IN packet is ACKED.
  732. *
  733. * Must be called with dev->lock held.
  734. */
  735. static void gr_set_address(struct gr_udc *dev, u8 address)
  736. {
  737. u32 control;
  738. control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
  739. control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
  740. control |= GR_CONTROL_SU;
  741. gr_write32(&dev->regs->control, control);
  742. }
  743. /*
  744. * Returns negative for STALL, 0 for successful handling and positive for
  745. * delegation.
  746. *
  747. * Must be called with dev->lock held.
  748. */
  749. static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
  750. u16 value, u16 index)
  751. {
  752. u16 response;
  753. u8 test;
  754. switch (request) {
  755. case USB_REQ_SET_ADDRESS:
  756. dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
  757. gr_set_address(dev, value & 0xff);
  758. if (value)
  759. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  760. else
  761. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  762. return gr_ep0_respond_empty(dev);
  763. case USB_REQ_GET_STATUS:
  764. /* Self powered | remote wakeup */
  765. response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
  766. return gr_ep0_respond_u16(dev, response);
  767. case USB_REQ_SET_FEATURE:
  768. switch (value) {
  769. case USB_DEVICE_REMOTE_WAKEUP:
  770. /* Allow remote wakeup */
  771. dev->remote_wakeup = 1;
  772. return gr_ep0_respond_empty(dev);
  773. case USB_DEVICE_TEST_MODE:
  774. /* The hardware does not support TEST_FORCE_EN */
  775. test = index >> 8;
  776. if (test >= TEST_J && test <= TEST_PACKET) {
  777. dev->test_mode = test;
  778. return gr_ep0_respond(dev, NULL, 0,
  779. gr_ep0_testmode_complete);
  780. }
  781. }
  782. break;
  783. case USB_REQ_CLEAR_FEATURE:
  784. switch (value) {
  785. case USB_DEVICE_REMOTE_WAKEUP:
  786. /* Disallow remote wakeup */
  787. dev->remote_wakeup = 0;
  788. return gr_ep0_respond_empty(dev);
  789. }
  790. break;
  791. }
  792. return 1; /* Delegate the rest */
  793. }
  794. /*
  795. * Returns negative for STALL, 0 for successful handling and positive for
  796. * delegation.
  797. *
  798. * Must be called with dev->lock held.
  799. */
  800. static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
  801. u16 value, u16 index)
  802. {
  803. if (dev->gadget.state != USB_STATE_CONFIGURED)
  804. return -1;
  805. /*
  806. * Should return STALL for invalid interfaces, but udc driver does not
  807. * know anything about that. However, many gadget drivers do not handle
  808. * GET_STATUS so we need to take care of that.
  809. */
  810. switch (request) {
  811. case USB_REQ_GET_STATUS:
  812. return gr_ep0_respond_u16(dev, 0x0000);
  813. case USB_REQ_SET_FEATURE:
  814. case USB_REQ_CLEAR_FEATURE:
  815. /*
  816. * No possible valid standard requests. Still let gadget drivers
  817. * have a go at it.
  818. */
  819. break;
  820. }
  821. return 1; /* Delegate the rest */
  822. }
  823. /*
  824. * Returns negative for STALL, 0 for successful handling and positive for
  825. * delegation.
  826. *
  827. * Must be called with dev->lock held.
  828. */
  829. static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
  830. u16 value, u16 index)
  831. {
  832. struct gr_ep *ep;
  833. int status;
  834. int halted;
  835. u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
  836. u8 is_in = index & USB_ENDPOINT_DIR_MASK;
  837. if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
  838. return -1;
  839. if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
  840. return -1;
  841. ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
  842. switch (request) {
  843. case USB_REQ_GET_STATUS:
  844. halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
  845. return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
  846. case USB_REQ_SET_FEATURE:
  847. switch (value) {
  848. case USB_ENDPOINT_HALT:
  849. status = gr_ep_halt_wedge(ep, 1, 0, 1);
  850. if (status >= 0)
  851. status = gr_ep0_respond_empty(dev);
  852. return status;
  853. }
  854. break;
  855. case USB_REQ_CLEAR_FEATURE:
  856. switch (value) {
  857. case USB_ENDPOINT_HALT:
  858. if (ep->wedged)
  859. return -1;
  860. status = gr_ep_halt_wedge(ep, 0, 0, 1);
  861. if (status >= 0)
  862. status = gr_ep0_respond_empty(dev);
  863. return status;
  864. }
  865. break;
  866. }
  867. return 1; /* Delegate the rest */
  868. }
  869. /* Must be called with dev->lock held */
  870. static void gr_ep0out_requeue(struct gr_udc *dev)
  871. {
  872. int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
  873. if (ret)
  874. dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
  875. ret);
  876. }
  877. /*
  878. * The main function dealing with setup requests on ep0.
  879. *
  880. * Must be called with dev->lock held and irqs disabled
  881. */
  882. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
  883. __releases(&dev->lock)
  884. __acquires(&dev->lock)
  885. {
  886. union {
  887. struct usb_ctrlrequest ctrl;
  888. u8 raw[8];
  889. u32 word[2];
  890. } u;
  891. u8 type;
  892. u8 request;
  893. u16 value;
  894. u16 index;
  895. u16 length;
  896. int i;
  897. int status;
  898. /* Restore from ep0 halt */
  899. if (dev->ep0state == GR_EP0_STALL) {
  900. gr_set_ep0state(dev, GR_EP0_SETUP);
  901. if (!req->req.actual)
  902. goto out;
  903. }
  904. if (dev->ep0state == GR_EP0_ISTATUS) {
  905. gr_set_ep0state(dev, GR_EP0_SETUP);
  906. if (req->req.actual > 0)
  907. dev_dbg(dev->dev,
  908. "Unexpected setup packet at state %s\n",
  909. gr_ep0state_string(GR_EP0_ISTATUS));
  910. else
  911. goto out; /* Got expected ZLP */
  912. } else if (dev->ep0state != GR_EP0_SETUP) {
  913. dev_info(dev->dev,
  914. "Unexpected ep0out request at state %s - stalling\n",
  915. gr_ep0state_string(dev->ep0state));
  916. gr_control_stall(dev);
  917. gr_set_ep0state(dev, GR_EP0_SETUP);
  918. goto out;
  919. } else if (!req->req.actual) {
  920. dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
  921. gr_ep0state_string(dev->ep0state));
  922. goto out;
  923. }
  924. /* Handle SETUP packet */
  925. for (i = 0; i < req->req.actual; i++)
  926. u.raw[i] = ((u8 *)req->req.buf)[i];
  927. type = u.ctrl.bRequestType;
  928. request = u.ctrl.bRequest;
  929. value = le16_to_cpu(u.ctrl.wValue);
  930. index = le16_to_cpu(u.ctrl.wIndex);
  931. length = le16_to_cpu(u.ctrl.wLength);
  932. gr_dbgprint_devreq(dev, type, request, value, index, length);
  933. /* Check for data stage */
  934. if (length) {
  935. if (type & USB_DIR_IN)
  936. gr_set_ep0state(dev, GR_EP0_IDATA);
  937. else
  938. gr_set_ep0state(dev, GR_EP0_ODATA);
  939. }
  940. status = 1; /* Positive status flags delegation */
  941. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  942. switch (type & USB_RECIP_MASK) {
  943. case USB_RECIP_DEVICE:
  944. status = gr_device_request(dev, type, request,
  945. value, index);
  946. break;
  947. case USB_RECIP_ENDPOINT:
  948. status = gr_endpoint_request(dev, type, request,
  949. value, index);
  950. break;
  951. case USB_RECIP_INTERFACE:
  952. status = gr_interface_request(dev, type, request,
  953. value, index);
  954. break;
  955. }
  956. }
  957. if (status > 0) {
  958. spin_unlock(&dev->lock);
  959. dev_vdbg(dev->dev, "DELEGATE\n");
  960. status = dev->driver->setup(&dev->gadget, &u.ctrl);
  961. spin_lock(&dev->lock);
  962. }
  963. /* Generate STALL on both ep0out and ep0in if requested */
  964. if (unlikely(status < 0)) {
  965. dev_vdbg(dev->dev, "STALL\n");
  966. gr_control_stall(dev);
  967. }
  968. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
  969. request == USB_REQ_SET_CONFIGURATION) {
  970. if (!value) {
  971. dev_dbg(dev->dev, "STATUS: deconfigured\n");
  972. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  973. } else if (status >= 0) {
  974. /* Not configured unless gadget OK:s it */
  975. dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
  976. usb_gadget_set_state(&dev->gadget,
  977. USB_STATE_CONFIGURED);
  978. }
  979. }
  980. /* Get ready for next stage */
  981. if (dev->ep0state == GR_EP0_ODATA)
  982. gr_set_ep0state(dev, GR_EP0_OSTATUS);
  983. else if (dev->ep0state == GR_EP0_IDATA)
  984. gr_set_ep0state(dev, GR_EP0_ISTATUS);
  985. else
  986. gr_set_ep0state(dev, GR_EP0_SETUP);
  987. out:
  988. gr_ep0out_requeue(dev);
  989. }
  990. /* ---------------------------------------------------------------------- */
  991. /* VBUS and USB reset handling */
  992. /* Must be called with dev->lock held and irqs disabled */
  993. static void gr_vbus_connected(struct gr_udc *dev, u32 status)
  994. {
  995. u32 control;
  996. dev->gadget.speed = GR_SPEED(status);
  997. usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
  998. /* Turn on full interrupts and pullup */
  999. control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
  1000. GR_CONTROL_SP | GR_CONTROL_EP);
  1001. gr_write32(&dev->regs->control, control);
  1002. }
  1003. /* Must be called with dev->lock held */
  1004. static void gr_enable_vbus_detect(struct gr_udc *dev)
  1005. {
  1006. u32 status;
  1007. dev->irq_enabled = 1;
  1008. wmb(); /* Make sure we do not ignore an interrupt */
  1009. gr_write32(&dev->regs->control, GR_CONTROL_VI);
  1010. /* Take care of the case we are already plugged in at this point */
  1011. status = gr_read32(&dev->regs->status);
  1012. if (status & GR_STATUS_VB)
  1013. gr_vbus_connected(dev, status);
  1014. }
  1015. /* Must be called with dev->lock held and irqs disabled */
  1016. static void gr_vbus_disconnected(struct gr_udc *dev)
  1017. {
  1018. gr_stop_activity(dev);
  1019. /* Report disconnect */
  1020. if (dev->driver && dev->driver->disconnect) {
  1021. spin_unlock(&dev->lock);
  1022. dev->driver->disconnect(&dev->gadget);
  1023. spin_lock(&dev->lock);
  1024. }
  1025. gr_enable_vbus_detect(dev);
  1026. }
  1027. /* Must be called with dev->lock held and irqs disabled */
  1028. static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
  1029. {
  1030. gr_set_address(dev, 0);
  1031. gr_set_ep0state(dev, GR_EP0_SETUP);
  1032. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  1033. dev->gadget.speed = GR_SPEED(status);
  1034. gr_ep_nuke(&dev->epo[0]);
  1035. gr_ep_nuke(&dev->epi[0]);
  1036. dev->epo[0].stopped = 0;
  1037. dev->epi[0].stopped = 0;
  1038. gr_ep0out_requeue(dev);
  1039. }
  1040. /* ---------------------------------------------------------------------- */
  1041. /* Irq handling */
  1042. /*
  1043. * Handles interrupts from in endpoints. Returns whether something was handled.
  1044. *
  1045. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1046. */
  1047. static int gr_handle_in_ep(struct gr_ep *ep)
  1048. {
  1049. struct gr_request *req;
  1050. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1051. if (!req->last_desc)
  1052. return 0;
  1053. if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
  1054. return 0; /* Not put in hardware buffers yet */
  1055. if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
  1056. return 0; /* Not transmitted yet, still in hardware buffers */
  1057. /* Write complete */
  1058. gr_dma_advance(ep, 0);
  1059. return 1;
  1060. }
  1061. /*
  1062. * Handles interrupts from out endpoints. Returns whether something was handled.
  1063. *
  1064. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1065. */
  1066. static int gr_handle_out_ep(struct gr_ep *ep)
  1067. {
  1068. u32 ep_dmactrl;
  1069. u32 ctrl;
  1070. u16 len;
  1071. struct gr_request *req;
  1072. struct gr_udc *dev = ep->dev;
  1073. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1074. if (!req->curr_desc)
  1075. return 0;
  1076. ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
  1077. if (ctrl & GR_DESC_OUT_CTRL_EN)
  1078. return 0; /* Not received yet */
  1079. /* Read complete */
  1080. len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
  1081. req->req.actual += len;
  1082. if (ctrl & GR_DESC_OUT_CTRL_SE)
  1083. req->setup = 1;
  1084. if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
  1085. /* Short packet or >= expected size - we are done */
  1086. if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
  1087. /*
  1088. * Send a status stage ZLP to ack the DATA stage in the
  1089. * OUT direction. This needs to be done before
  1090. * gr_dma_advance as that can lead to a call to
  1091. * ep0_setup that can change dev->ep0state.
  1092. */
  1093. gr_ep0_respond_empty(dev);
  1094. gr_set_ep0state(dev, GR_EP0_SETUP);
  1095. }
  1096. gr_dma_advance(ep, 0);
  1097. } else {
  1098. /* Not done yet. Enable the next descriptor to receive more. */
  1099. req->curr_desc = req->curr_desc->next_desc;
  1100. req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  1101. ep_dmactrl = gr_read32(&ep->regs->dmactrl);
  1102. gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
  1103. }
  1104. return 1;
  1105. }
  1106. /*
  1107. * Handle state changes. Returns whether something was handled.
  1108. *
  1109. * Must be called with dev->lock held and irqs disabled.
  1110. */
  1111. static int gr_handle_state_changes(struct gr_udc *dev)
  1112. {
  1113. u32 status = gr_read32(&dev->regs->status);
  1114. int handled = 0;
  1115. int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
  1116. dev->gadget.state == USB_STATE_ATTACHED);
  1117. /* VBUS valid detected */
  1118. if (!powstate && (status & GR_STATUS_VB)) {
  1119. dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
  1120. gr_vbus_connected(dev, status);
  1121. handled = 1;
  1122. }
  1123. /* Disconnect */
  1124. if (powstate && !(status & GR_STATUS_VB)) {
  1125. dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
  1126. gr_vbus_disconnected(dev);
  1127. handled = 1;
  1128. }
  1129. /* USB reset detected */
  1130. if (status & GR_STATUS_UR) {
  1131. dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
  1132. GR_SPEED_STR(status));
  1133. gr_write32(&dev->regs->status, GR_STATUS_UR);
  1134. gr_udc_usbreset(dev, status);
  1135. handled = 1;
  1136. }
  1137. /* Speed change */
  1138. if (dev->gadget.speed != GR_SPEED(status)) {
  1139. dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
  1140. GR_SPEED_STR(status));
  1141. dev->gadget.speed = GR_SPEED(status);
  1142. handled = 1;
  1143. }
  1144. /* Going into suspend */
  1145. if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
  1146. dev_dbg(dev->dev, "STATUS: USB suspend\n");
  1147. gr_set_ep0state(dev, GR_EP0_SUSPEND);
  1148. dev->suspended_from = dev->gadget.state;
  1149. usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
  1150. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1151. dev->driver && dev->driver->suspend) {
  1152. spin_unlock(&dev->lock);
  1153. dev->driver->suspend(&dev->gadget);
  1154. spin_lock(&dev->lock);
  1155. }
  1156. handled = 1;
  1157. }
  1158. /* Coming out of suspend */
  1159. if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
  1160. dev_dbg(dev->dev, "STATUS: USB resume\n");
  1161. if (dev->suspended_from == USB_STATE_POWERED)
  1162. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1163. else
  1164. gr_set_ep0state(dev, GR_EP0_SETUP);
  1165. usb_gadget_set_state(&dev->gadget, dev->suspended_from);
  1166. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1167. dev->driver && dev->driver->resume) {
  1168. spin_unlock(&dev->lock);
  1169. dev->driver->resume(&dev->gadget);
  1170. spin_lock(&dev->lock);
  1171. }
  1172. handled = 1;
  1173. }
  1174. return handled;
  1175. }
  1176. /* Non-interrupt context irq handler */
  1177. static irqreturn_t gr_irq_handler(int irq, void *_dev)
  1178. {
  1179. struct gr_udc *dev = _dev;
  1180. struct gr_ep *ep;
  1181. int handled = 0;
  1182. int i;
  1183. unsigned long flags;
  1184. spin_lock_irqsave(&dev->lock, flags);
  1185. if (!dev->irq_enabled)
  1186. goto out;
  1187. /*
  1188. * Check IN ep interrupts. We check these before the OUT eps because
  1189. * some gadgets reuse the request that might already be currently
  1190. * outstanding and needs to be completed (mainly setup requests).
  1191. */
  1192. for (i = 0; i < dev->nepi; i++) {
  1193. ep = &dev->epi[i];
  1194. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1195. handled = gr_handle_in_ep(ep) || handled;
  1196. }
  1197. /* Check OUT ep interrupts */
  1198. for (i = 0; i < dev->nepo; i++) {
  1199. ep = &dev->epo[i];
  1200. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1201. handled = gr_handle_out_ep(ep) || handled;
  1202. }
  1203. /* Check status interrupts */
  1204. handled = gr_handle_state_changes(dev) || handled;
  1205. /*
  1206. * Check AMBA DMA errors. Only check if we didn't find anything else to
  1207. * handle because this shouldn't happen if we did everything right.
  1208. */
  1209. if (!handled) {
  1210. list_for_each_entry(ep, &dev->ep_list, ep_list) {
  1211. if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
  1212. dev_err(dev->dev,
  1213. "AMBA Error occurred for %s\n",
  1214. ep->ep.name);
  1215. handled = 1;
  1216. }
  1217. }
  1218. }
  1219. out:
  1220. spin_unlock_irqrestore(&dev->lock, flags);
  1221. return handled ? IRQ_HANDLED : IRQ_NONE;
  1222. }
  1223. /* Interrupt context irq handler */
  1224. static irqreturn_t gr_irq(int irq, void *_dev)
  1225. {
  1226. struct gr_udc *dev = _dev;
  1227. if (!dev->irq_enabled)
  1228. return IRQ_NONE;
  1229. return IRQ_WAKE_THREAD;
  1230. }
  1231. /* ---------------------------------------------------------------------- */
  1232. /* USB ep ops */
  1233. /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
  1234. static int gr_ep_enable(struct usb_ep *_ep,
  1235. const struct usb_endpoint_descriptor *desc)
  1236. {
  1237. struct gr_udc *dev;
  1238. struct gr_ep *ep;
  1239. u8 mode;
  1240. u8 nt;
  1241. u16 max;
  1242. u16 buffer_size = 0;
  1243. u32 epctrl;
  1244. ep = container_of(_ep, struct gr_ep, ep);
  1245. if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
  1246. return -EINVAL;
  1247. dev = ep->dev;
  1248. /* 'ep0' IN and OUT are reserved */
  1249. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1250. return -EINVAL;
  1251. if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
  1252. return -ESHUTDOWN;
  1253. /* Make sure we are clear for enabling */
  1254. epctrl = gr_read32(&ep->regs->epctrl);
  1255. if (epctrl & GR_EPCTRL_EV)
  1256. return -EBUSY;
  1257. /* Check that directions match */
  1258. if (!ep->is_in != !usb_endpoint_dir_in(desc))
  1259. return -EINVAL;
  1260. /* Check ep num */
  1261. if ((!ep->is_in && ep->num >= dev->nepo) ||
  1262. (ep->is_in && ep->num >= dev->nepi))
  1263. return -EINVAL;
  1264. if (usb_endpoint_xfer_control(desc)) {
  1265. mode = 0;
  1266. } else if (usb_endpoint_xfer_isoc(desc)) {
  1267. mode = 1;
  1268. } else if (usb_endpoint_xfer_bulk(desc)) {
  1269. mode = 2;
  1270. } else if (usb_endpoint_xfer_int(desc)) {
  1271. mode = 3;
  1272. } else {
  1273. dev_err(dev->dev, "Unknown transfer type for %s\n",
  1274. ep->ep.name);
  1275. return -EINVAL;
  1276. }
  1277. /*
  1278. * Bits 10-0 set the max payload. 12-11 set the number of
  1279. * additional transactions.
  1280. */
  1281. max = 0x7ff & usb_endpoint_maxp(desc);
  1282. nt = usb_endpoint_maxp_mult(desc) - 1;
  1283. buffer_size = GR_BUFFER_SIZE(epctrl);
  1284. if (nt && (mode == 0 || mode == 2)) {
  1285. dev_err(dev->dev,
  1286. "%s mode: multiple trans./microframe not valid\n",
  1287. (mode == 2 ? "Bulk" : "Control"));
  1288. return -EINVAL;
  1289. } else if (nt == 0x3) {
  1290. dev_err(dev->dev,
  1291. "Invalid value 0x3 for additional trans./microframe\n");
  1292. return -EINVAL;
  1293. } else if ((nt + 1) * max > buffer_size) {
  1294. dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
  1295. buffer_size, (nt + 1), max);
  1296. return -EINVAL;
  1297. } else if (max == 0) {
  1298. dev_err(dev->dev, "Max payload cannot be set to 0\n");
  1299. return -EINVAL;
  1300. } else if (max > ep->ep.maxpacket_limit) {
  1301. dev_err(dev->dev, "Requested max payload %d > limit %d\n",
  1302. max, ep->ep.maxpacket_limit);
  1303. return -EINVAL;
  1304. }
  1305. spin_lock(&ep->dev->lock);
  1306. if (!ep->stopped) {
  1307. spin_unlock(&ep->dev->lock);
  1308. return -EBUSY;
  1309. }
  1310. ep->stopped = 0;
  1311. ep->wedged = 0;
  1312. ep->ep.desc = desc;
  1313. ep->ep.maxpacket = max;
  1314. ep->dma_start = 0;
  1315. if (nt) {
  1316. /*
  1317. * Maximum possible size of all payloads in one microframe
  1318. * regardless of direction when using high-bandwidth mode.
  1319. */
  1320. ep->bytes_per_buffer = (nt + 1) * max;
  1321. } else if (ep->is_in) {
  1322. /*
  1323. * The biggest multiple of maximum packet size that fits into
  1324. * the buffer. The hardware will split up into many packets in
  1325. * the IN direction.
  1326. */
  1327. ep->bytes_per_buffer = (buffer_size / max) * max;
  1328. } else {
  1329. /*
  1330. * Only single packets will be placed the buffers in the OUT
  1331. * direction.
  1332. */
  1333. ep->bytes_per_buffer = max;
  1334. }
  1335. epctrl = (max << GR_EPCTRL_MAXPL_POS)
  1336. | (nt << GR_EPCTRL_NT_POS)
  1337. | (mode << GR_EPCTRL_TT_POS)
  1338. | GR_EPCTRL_EV;
  1339. if (ep->is_in)
  1340. epctrl |= GR_EPCTRL_PI;
  1341. gr_write32(&ep->regs->epctrl, epctrl);
  1342. gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
  1343. spin_unlock(&ep->dev->lock);
  1344. dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
  1345. ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
  1346. return 0;
  1347. }
  1348. /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
  1349. static int gr_ep_disable(struct usb_ep *_ep)
  1350. {
  1351. struct gr_ep *ep;
  1352. struct gr_udc *dev;
  1353. unsigned long flags;
  1354. ep = container_of(_ep, struct gr_ep, ep);
  1355. if (!_ep || !ep->ep.desc)
  1356. return -ENODEV;
  1357. dev = ep->dev;
  1358. /* 'ep0' IN and OUT are reserved */
  1359. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1360. return -EINVAL;
  1361. if (dev->ep0state == GR_EP0_SUSPEND)
  1362. return -EBUSY;
  1363. dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
  1364. spin_lock_irqsave(&dev->lock, flags);
  1365. gr_ep_nuke(ep);
  1366. gr_ep_reset(ep);
  1367. ep->ep.desc = NULL;
  1368. spin_unlock_irqrestore(&dev->lock, flags);
  1369. return 0;
  1370. }
  1371. /*
  1372. * Frees a request, but not any DMA buffers associated with it
  1373. * (gr_finish_request should already have taken care of that).
  1374. */
  1375. static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
  1376. {
  1377. struct gr_request *req;
  1378. if (!_ep || !_req)
  1379. return;
  1380. req = container_of(_req, struct gr_request, req);
  1381. /* Leads to memory leak */
  1382. WARN(!list_empty(&req->queue),
  1383. "request not dequeued properly before freeing\n");
  1384. kfree(req);
  1385. }
  1386. /* Queue a request from the gadget */
  1387. static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
  1388. gfp_t gfp_flags)
  1389. {
  1390. struct gr_ep *ep;
  1391. struct gr_request *req;
  1392. struct gr_udc *dev;
  1393. int ret;
  1394. if (unlikely(!_ep || !_req))
  1395. return -EINVAL;
  1396. ep = container_of(_ep, struct gr_ep, ep);
  1397. req = container_of(_req, struct gr_request, req);
  1398. dev = ep->dev;
  1399. spin_lock(&ep->dev->lock);
  1400. /*
  1401. * The ep0 pointer in the gadget struct is used both for ep0in and
  1402. * ep0out. In a data stage in the out direction ep0out needs to be used
  1403. * instead of the default ep0in. Completion functions might use
  1404. * driver_data, so that needs to be copied as well.
  1405. */
  1406. if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
  1407. ep = &dev->epo[0];
  1408. ep->ep.driver_data = dev->epi[0].ep.driver_data;
  1409. }
  1410. if (ep->is_in)
  1411. gr_dbgprint_request("EXTERN", ep, req);
  1412. ret = gr_queue(ep, req, GFP_ATOMIC);
  1413. spin_unlock(&ep->dev->lock);
  1414. return ret;
  1415. }
  1416. /* Dequeue JUST ONE request */
  1417. static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  1418. {
  1419. struct gr_request *req;
  1420. struct gr_ep *ep;
  1421. struct gr_udc *dev;
  1422. int ret = 0;
  1423. unsigned long flags;
  1424. ep = container_of(_ep, struct gr_ep, ep);
  1425. if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
  1426. return -EINVAL;
  1427. dev = ep->dev;
  1428. if (!dev->driver)
  1429. return -ESHUTDOWN;
  1430. /* We can't touch (DMA) registers when suspended */
  1431. if (dev->ep0state == GR_EP0_SUSPEND)
  1432. return -EBUSY;
  1433. spin_lock_irqsave(&dev->lock, flags);
  1434. /* Make sure it's actually queued on this endpoint */
  1435. list_for_each_entry(req, &ep->queue, queue) {
  1436. if (&req->req == _req)
  1437. break;
  1438. }
  1439. if (&req->req != _req) {
  1440. ret = -EINVAL;
  1441. goto out;
  1442. }
  1443. if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
  1444. /* This request is currently being processed */
  1445. gr_abort_dma(ep);
  1446. if (ep->stopped)
  1447. gr_finish_request(ep, req, -ECONNRESET);
  1448. else
  1449. gr_dma_advance(ep, -ECONNRESET);
  1450. } else if (!list_empty(&req->queue)) {
  1451. /* Not being processed - gr_finish_request dequeues it */
  1452. gr_finish_request(ep, req, -ECONNRESET);
  1453. } else {
  1454. ret = -EOPNOTSUPP;
  1455. }
  1456. out:
  1457. spin_unlock_irqrestore(&dev->lock, flags);
  1458. return ret;
  1459. }
  1460. /* Helper for gr_set_halt and gr_set_wedge */
  1461. static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  1462. {
  1463. int ret;
  1464. struct gr_ep *ep;
  1465. if (!_ep)
  1466. return -ENODEV;
  1467. ep = container_of(_ep, struct gr_ep, ep);
  1468. spin_lock(&ep->dev->lock);
  1469. /* Halting an IN endpoint should fail if queue is not empty */
  1470. if (halt && ep->is_in && !list_empty(&ep->queue)) {
  1471. ret = -EAGAIN;
  1472. goto out;
  1473. }
  1474. ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
  1475. out:
  1476. spin_unlock(&ep->dev->lock);
  1477. return ret;
  1478. }
  1479. /* Halt endpoint */
  1480. static int gr_set_halt(struct usb_ep *_ep, int halt)
  1481. {
  1482. return gr_set_halt_wedge(_ep, halt, 0);
  1483. }
  1484. /* Halt and wedge endpoint */
  1485. static int gr_set_wedge(struct usb_ep *_ep)
  1486. {
  1487. return gr_set_halt_wedge(_ep, 1, 1);
  1488. }
  1489. /*
  1490. * Return the total number of bytes currently stored in the internal buffers of
  1491. * the endpoint.
  1492. */
  1493. static int gr_fifo_status(struct usb_ep *_ep)
  1494. {
  1495. struct gr_ep *ep;
  1496. u32 epstat;
  1497. u32 bytes = 0;
  1498. if (!_ep)
  1499. return -ENODEV;
  1500. ep = container_of(_ep, struct gr_ep, ep);
  1501. epstat = gr_read32(&ep->regs->epstat);
  1502. if (epstat & GR_EPSTAT_B0)
  1503. bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
  1504. if (epstat & GR_EPSTAT_B1)
  1505. bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
  1506. return bytes;
  1507. }
  1508. /* Empty data from internal buffers of an endpoint. */
  1509. static void gr_fifo_flush(struct usb_ep *_ep)
  1510. {
  1511. struct gr_ep *ep;
  1512. u32 epctrl;
  1513. if (!_ep)
  1514. return;
  1515. ep = container_of(_ep, struct gr_ep, ep);
  1516. dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
  1517. spin_lock(&ep->dev->lock);
  1518. epctrl = gr_read32(&ep->regs->epctrl);
  1519. epctrl |= GR_EPCTRL_CB;
  1520. gr_write32(&ep->regs->epctrl, epctrl);
  1521. spin_unlock(&ep->dev->lock);
  1522. }
  1523. static struct usb_ep_ops gr_ep_ops = {
  1524. .enable = gr_ep_enable,
  1525. .disable = gr_ep_disable,
  1526. .alloc_request = gr_alloc_request,
  1527. .free_request = gr_free_request,
  1528. .queue = gr_queue_ext,
  1529. .dequeue = gr_dequeue,
  1530. .set_halt = gr_set_halt,
  1531. .set_wedge = gr_set_wedge,
  1532. .fifo_status = gr_fifo_status,
  1533. .fifo_flush = gr_fifo_flush,
  1534. };
  1535. /* ---------------------------------------------------------------------- */
  1536. /* USB Gadget ops */
  1537. static int gr_get_frame(struct usb_gadget *_gadget)
  1538. {
  1539. struct gr_udc *dev;
  1540. if (!_gadget)
  1541. return -ENODEV;
  1542. dev = container_of(_gadget, struct gr_udc, gadget);
  1543. return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
  1544. }
  1545. static int gr_wakeup(struct usb_gadget *_gadget)
  1546. {
  1547. struct gr_udc *dev;
  1548. if (!_gadget)
  1549. return -ENODEV;
  1550. dev = container_of(_gadget, struct gr_udc, gadget);
  1551. /* Remote wakeup feature not enabled by host*/
  1552. if (!dev->remote_wakeup)
  1553. return -EINVAL;
  1554. spin_lock(&dev->lock);
  1555. gr_write32(&dev->regs->control,
  1556. gr_read32(&dev->regs->control) | GR_CONTROL_RW);
  1557. spin_unlock(&dev->lock);
  1558. return 0;
  1559. }
  1560. static int gr_pullup(struct usb_gadget *_gadget, int is_on)
  1561. {
  1562. struct gr_udc *dev;
  1563. u32 control;
  1564. if (!_gadget)
  1565. return -ENODEV;
  1566. dev = container_of(_gadget, struct gr_udc, gadget);
  1567. spin_lock(&dev->lock);
  1568. control = gr_read32(&dev->regs->control);
  1569. if (is_on)
  1570. control |= GR_CONTROL_EP;
  1571. else
  1572. control &= ~GR_CONTROL_EP;
  1573. gr_write32(&dev->regs->control, control);
  1574. spin_unlock(&dev->lock);
  1575. return 0;
  1576. }
  1577. static int gr_udc_start(struct usb_gadget *gadget,
  1578. struct usb_gadget_driver *driver)
  1579. {
  1580. struct gr_udc *dev = to_gr_udc(gadget);
  1581. spin_lock(&dev->lock);
  1582. /* Hook up the driver */
  1583. driver->driver.bus = NULL;
  1584. dev->driver = driver;
  1585. /* Get ready for host detection */
  1586. gr_enable_vbus_detect(dev);
  1587. spin_unlock(&dev->lock);
  1588. return 0;
  1589. }
  1590. static int gr_udc_stop(struct usb_gadget *gadget)
  1591. {
  1592. struct gr_udc *dev = to_gr_udc(gadget);
  1593. unsigned long flags;
  1594. spin_lock_irqsave(&dev->lock, flags);
  1595. dev->driver = NULL;
  1596. gr_stop_activity(dev);
  1597. spin_unlock_irqrestore(&dev->lock, flags);
  1598. return 0;
  1599. }
  1600. static const struct usb_gadget_ops gr_ops = {
  1601. .get_frame = gr_get_frame,
  1602. .wakeup = gr_wakeup,
  1603. .pullup = gr_pullup,
  1604. .udc_start = gr_udc_start,
  1605. .udc_stop = gr_udc_stop,
  1606. /* Other operations not supported */
  1607. };
  1608. /* ---------------------------------------------------------------------- */
  1609. /* Module probe, removal and of-matching */
  1610. static const char * const onames[] = {
  1611. "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
  1612. "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
  1613. "ep12out", "ep13out", "ep14out", "ep15out"
  1614. };
  1615. static const char * const inames[] = {
  1616. "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
  1617. "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
  1618. "ep12in", "ep13in", "ep14in", "ep15in"
  1619. };
  1620. /* Must be called with dev->lock held */
  1621. static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
  1622. {
  1623. struct gr_ep *ep;
  1624. struct gr_request *req;
  1625. struct usb_request *_req;
  1626. void *buf;
  1627. if (is_in) {
  1628. ep = &dev->epi[num];
  1629. ep->ep.name = inames[num];
  1630. ep->regs = &dev->regs->epi[num];
  1631. } else {
  1632. ep = &dev->epo[num];
  1633. ep->ep.name = onames[num];
  1634. ep->regs = &dev->regs->epo[num];
  1635. }
  1636. gr_ep_reset(ep);
  1637. ep->num = num;
  1638. ep->is_in = is_in;
  1639. ep->dev = dev;
  1640. ep->ep.ops = &gr_ep_ops;
  1641. INIT_LIST_HEAD(&ep->queue);
  1642. if (num == 0) {
  1643. _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
  1644. buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
  1645. if (!_req || !buf) {
  1646. /* possible _req freed by gr_probe via gr_remove */
  1647. return -ENOMEM;
  1648. }
  1649. req = container_of(_req, struct gr_request, req);
  1650. req->req.buf = buf;
  1651. req->req.length = MAX_CTRL_PL_SIZE;
  1652. if (is_in)
  1653. dev->ep0reqi = req; /* Complete gets set as used */
  1654. else
  1655. dev->ep0reqo = req; /* Completion treated separately */
  1656. usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
  1657. ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
  1658. ep->ep.caps.type_control = true;
  1659. } else {
  1660. usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
  1661. list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
  1662. ep->ep.caps.type_iso = true;
  1663. ep->ep.caps.type_bulk = true;
  1664. ep->ep.caps.type_int = true;
  1665. }
  1666. list_add_tail(&ep->ep_list, &dev->ep_list);
  1667. if (is_in)
  1668. ep->ep.caps.dir_in = true;
  1669. else
  1670. ep->ep.caps.dir_out = true;
  1671. ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
  1672. &ep->tailbuf_paddr, GFP_ATOMIC);
  1673. if (!ep->tailbuf)
  1674. return -ENOMEM;
  1675. return 0;
  1676. }
  1677. /* Must be called with dev->lock held */
  1678. static int gr_udc_init(struct gr_udc *dev)
  1679. {
  1680. struct device_node *np = dev->dev->of_node;
  1681. u32 epctrl_val;
  1682. u32 dmactrl_val;
  1683. int i;
  1684. int ret = 0;
  1685. u32 bufsize;
  1686. gr_set_address(dev, 0);
  1687. INIT_LIST_HEAD(&dev->gadget.ep_list);
  1688. dev->gadget.speed = USB_SPEED_UNKNOWN;
  1689. dev->gadget.ep0 = &dev->epi[0].ep;
  1690. INIT_LIST_HEAD(&dev->ep_list);
  1691. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1692. for (i = 0; i < dev->nepo; i++) {
  1693. if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
  1694. bufsize = 1024;
  1695. ret = gr_ep_init(dev, i, 0, bufsize);
  1696. if (ret)
  1697. return ret;
  1698. }
  1699. for (i = 0; i < dev->nepi; i++) {
  1700. if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
  1701. bufsize = 1024;
  1702. ret = gr_ep_init(dev, i, 1, bufsize);
  1703. if (ret)
  1704. return ret;
  1705. }
  1706. /* Must be disabled by default */
  1707. dev->remote_wakeup = 0;
  1708. /* Enable ep0out and ep0in */
  1709. epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
  1710. dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
  1711. gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
  1712. gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
  1713. gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
  1714. gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
  1715. return 0;
  1716. }
  1717. static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
  1718. {
  1719. struct gr_ep *ep;
  1720. if (is_in)
  1721. ep = &dev->epi[num];
  1722. else
  1723. ep = &dev->epo[num];
  1724. if (ep->tailbuf)
  1725. dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
  1726. ep->tailbuf, ep->tailbuf_paddr);
  1727. }
  1728. static int gr_remove(struct platform_device *pdev)
  1729. {
  1730. struct gr_udc *dev = platform_get_drvdata(pdev);
  1731. int i;
  1732. if (dev->added)
  1733. usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
  1734. if (dev->driver)
  1735. return -EBUSY;
  1736. gr_dfs_delete(dev);
  1737. dma_pool_destroy(dev->desc_pool);
  1738. platform_set_drvdata(pdev, NULL);
  1739. gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
  1740. gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
  1741. for (i = 0; i < dev->nepo; i++)
  1742. gr_ep_remove(dev, i, 0);
  1743. for (i = 0; i < dev->nepi; i++)
  1744. gr_ep_remove(dev, i, 1);
  1745. return 0;
  1746. }
  1747. static int gr_request_irq(struct gr_udc *dev, int irq)
  1748. {
  1749. return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
  1750. IRQF_SHARED, driver_name, dev);
  1751. }
  1752. static int gr_probe(struct platform_device *pdev)
  1753. {
  1754. struct gr_udc *dev;
  1755. struct resource *res;
  1756. struct gr_regs __iomem *regs;
  1757. int retval;
  1758. u32 status;
  1759. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  1760. if (!dev)
  1761. return -ENOMEM;
  1762. dev->dev = &pdev->dev;
  1763. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1764. regs = devm_ioremap_resource(dev->dev, res);
  1765. if (IS_ERR(regs))
  1766. return PTR_ERR(regs);
  1767. dev->irq = platform_get_irq(pdev, 0);
  1768. if (dev->irq <= 0) {
  1769. dev_err(dev->dev, "No irq found\n");
  1770. return -ENODEV;
  1771. }
  1772. /* Some core configurations has separate irqs for IN and OUT events */
  1773. dev->irqi = platform_get_irq(pdev, 1);
  1774. if (dev->irqi > 0) {
  1775. dev->irqo = platform_get_irq(pdev, 2);
  1776. if (dev->irqo <= 0) {
  1777. dev_err(dev->dev, "Found irqi but not irqo\n");
  1778. return -ENODEV;
  1779. }
  1780. } else {
  1781. dev->irqi = 0;
  1782. }
  1783. dev->gadget.name = driver_name;
  1784. dev->gadget.max_speed = USB_SPEED_HIGH;
  1785. dev->gadget.ops = &gr_ops;
  1786. spin_lock_init(&dev->lock);
  1787. dev->regs = regs;
  1788. platform_set_drvdata(pdev, dev);
  1789. /* Determine number of endpoints and data interface mode */
  1790. status = gr_read32(&dev->regs->status);
  1791. dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
  1792. dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
  1793. if (!(status & GR_STATUS_DM)) {
  1794. dev_err(dev->dev, "Slave mode cores are not supported\n");
  1795. return -ENODEV;
  1796. }
  1797. /* --- Effects of the following calls might need explicit cleanup --- */
  1798. /* Create DMA pool for descriptors */
  1799. dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
  1800. sizeof(struct gr_dma_desc), 4, 0);
  1801. if (!dev->desc_pool) {
  1802. dev_err(dev->dev, "Could not allocate DMA pool");
  1803. return -ENOMEM;
  1804. }
  1805. spin_lock(&dev->lock);
  1806. /* Inside lock so that no gadget can use this udc until probe is done */
  1807. retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
  1808. if (retval) {
  1809. dev_err(dev->dev, "Could not add gadget udc");
  1810. goto out;
  1811. }
  1812. dev->added = 1;
  1813. retval = gr_udc_init(dev);
  1814. if (retval)
  1815. goto out;
  1816. gr_dfs_create(dev);
  1817. /* Clear all interrupt enables that might be left on since last boot */
  1818. gr_disable_interrupts_and_pullup(dev);
  1819. retval = gr_request_irq(dev, dev->irq);
  1820. if (retval) {
  1821. dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
  1822. goto out;
  1823. }
  1824. if (dev->irqi) {
  1825. retval = gr_request_irq(dev, dev->irqi);
  1826. if (retval) {
  1827. dev_err(dev->dev, "Failed to request irqi %d\n",
  1828. dev->irqi);
  1829. goto out;
  1830. }
  1831. retval = gr_request_irq(dev, dev->irqo);
  1832. if (retval) {
  1833. dev_err(dev->dev, "Failed to request irqo %d\n",
  1834. dev->irqo);
  1835. goto out;
  1836. }
  1837. }
  1838. if (dev->irqi)
  1839. dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
  1840. dev->irq, dev->irqi, dev->irqo);
  1841. else
  1842. dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
  1843. out:
  1844. spin_unlock(&dev->lock);
  1845. if (retval)
  1846. gr_remove(pdev);
  1847. return retval;
  1848. }
  1849. static const struct of_device_id gr_match[] = {
  1850. {.name = "GAISLER_USBDC"},
  1851. {.name = "01_021"},
  1852. {},
  1853. };
  1854. MODULE_DEVICE_TABLE(of, gr_match);
  1855. static struct platform_driver gr_driver = {
  1856. .driver = {
  1857. .name = DRIVER_NAME,
  1858. .of_match_table = gr_match,
  1859. },
  1860. .probe = gr_probe,
  1861. .remove = gr_remove,
  1862. };
  1863. module_platform_driver(gr_driver);
  1864. MODULE_AUTHOR("Aeroflex Gaisler AB.");
  1865. MODULE_DESCRIPTION(DRIVER_DESC);
  1866. MODULE_LICENSE("GPL");