gadget.c 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Cadence USBSS DRD Driver - gadget side.
  4. *
  5. * Copyright (C) 2018-2019 Cadence Design Systems.
  6. * Copyright (C) 2017-2018 NXP
  7. *
  8. * Authors: Pawel Jez <pjez@cadence.com>,
  9. * Pawel Laszczak <pawell@cadence.com>
  10. * Peter Chen <peter.chen@nxp.com>
  11. */
  12. /*
  13. * Work around 1:
  14. * At some situations, the controller may get stale data address in TRB
  15. * at below sequences:
  16. * 1. Controller read TRB includes data address
  17. * 2. Software updates TRBs includes data address and Cycle bit
  18. * 3. Controller read TRB which includes Cycle bit
  19. * 4. DMA run with stale data address
  20. *
  21. * To fix this problem, driver needs to make the first TRB in TD as invalid.
  22. * After preparing all TRBs driver needs to check the position of DMA and
  23. * if the DMA point to the first just added TRB and doorbell is 1,
  24. * then driver must defer making this TRB as valid. This TRB will be make
  25. * as valid during adding next TRB only if DMA is stopped or at TRBERR
  26. * interrupt.
  27. *
  28. * Issue has been fixed in DEV_VER_V3 version of controller.
  29. *
  30. * Work around 2:
  31. * Controller for OUT endpoints has shared on-chip buffers for all incoming
  32. * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA
  33. * in correct order. If the first packet in the buffer will not be handled,
  34. * then the following packets directed for other endpoints and functions
  35. * will be blocked.
  36. * Additionally the packets directed to one endpoint can block entire on-chip
  37. * buffers. In this case transfer to other endpoints also will blocked.
  38. *
  39. * To resolve this issue after raising the descriptor missing interrupt
  40. * driver prepares internal usb_request object and use it to arm DMA transfer.
  41. *
  42. * The problematic situation was observed in case when endpoint has been enabled
  43. * but no usb_request were queued. Driver try detects such endpoints and will
  44. * use this workaround only for these endpoint.
  45. *
  46. * Driver use limited number of buffer. This number can be set by macro
  47. * CDNS3_WA2_NUM_BUFFERS.
  48. *
  49. * Such blocking situation was observed on ACM gadget. For this function
  50. * host send OUT data packet but ACM function is not prepared for this packet.
  51. * It's cause that buffer placed in on chip memory block transfer to other
  52. * endpoints.
  53. *
  54. * Issue has been fixed in DEV_VER_V2 version of controller.
  55. *
  56. */
  57. #include <linux/dma-mapping.h>
  58. #include <linux/usb/gadget.h>
  59. #include <linux/module.h>
  60. #include <linux/iopoll.h>
  61. #include "core.h"
  62. #include "gadget-export.h"
  63. #include "gadget.h"
  64. #include "trace.h"
  65. #include "drd.h"
  66. static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
  67. struct usb_request *request,
  68. gfp_t gfp_flags);
  69. /**
  70. * cdns3_set_register_bit - set bit in given register.
  71. * @ptr: address of device controller register to be read and changed
  72. * @mask: bits requested to set
  73. */
  74. void cdns3_set_register_bit(void __iomem *ptr, u32 mask)
  75. {
  76. mask = readl(ptr) | mask;
  77. writel(mask, ptr);
  78. }
  79. /**
  80. * cdns3_ep_addr_to_index - Macro converts endpoint address to
  81. * index of endpoint object in cdns3_device.eps[] container
  82. * @ep_addr: endpoint address for which endpoint object is required
  83. *
  84. */
  85. u8 cdns3_ep_addr_to_index(u8 ep_addr)
  86. {
  87. return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0));
  88. }
  89. static int cdns3_get_dma_pos(struct cdns3_device *priv_dev,
  90. struct cdns3_endpoint *priv_ep)
  91. {
  92. int dma_index;
  93. dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma;
  94. return dma_index / TRB_SIZE;
  95. }
  96. /**
  97. * cdns3_next_request - returns next request from list
  98. * @list: list containing requests
  99. *
  100. * Returns request or NULL if no requests in list
  101. */
  102. struct usb_request *cdns3_next_request(struct list_head *list)
  103. {
  104. return list_first_entry_or_null(list, struct usb_request, list);
  105. }
  106. /**
  107. * cdns3_next_align_buf - returns next buffer from list
  108. * @list: list containing buffers
  109. *
  110. * Returns buffer or NULL if no buffers in list
  111. */
  112. struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
  113. {
  114. return list_first_entry_or_null(list, struct cdns3_aligned_buf, list);
  115. }
  116. /**
  117. * cdns3_next_priv_request - returns next request from list
  118. * @list: list containing requests
  119. *
  120. * Returns request or NULL if no requests in list
  121. */
  122. struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
  123. {
  124. return list_first_entry_or_null(list, struct cdns3_request, list);
  125. }
  126. /**
  127. * select_ep - selects endpoint
  128. * @priv_dev: extended gadget object
  129. * @ep: endpoint address
  130. */
  131. void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep)
  132. {
  133. if (priv_dev->selected_ep == ep)
  134. return;
  135. priv_dev->selected_ep = ep;
  136. writel(ep, &priv_dev->regs->ep_sel);
  137. }
  138. dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
  139. struct cdns3_trb *trb)
  140. {
  141. u32 offset = (char *)trb - (char *)priv_ep->trb_pool;
  142. return priv_ep->trb_pool_dma + offset;
  143. }
  144. int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
  145. {
  146. switch (priv_ep->type) {
  147. case USB_ENDPOINT_XFER_ISOC:
  148. return TRB_ISO_RING_SIZE;
  149. case USB_ENDPOINT_XFER_CONTROL:
  150. return TRB_CTRL_RING_SIZE;
  151. default:
  152. return TRB_RING_SIZE;
  153. }
  154. }
  155. /**
  156. * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint
  157. * @priv_ep: endpoint object
  158. *
  159. * Function will return 0 on success or -ENOMEM on allocation error
  160. */
  161. int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
  162. {
  163. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  164. int ring_size = cdns3_ring_size(priv_ep);
  165. struct cdns3_trb *link_trb;
  166. if (!priv_ep->trb_pool) {
  167. priv_ep->trb_pool = dma_alloc_coherent(priv_dev->sysdev,
  168. ring_size,
  169. &priv_ep->trb_pool_dma,
  170. GFP_DMA32 | GFP_ATOMIC);
  171. if (!priv_ep->trb_pool)
  172. return -ENOMEM;
  173. } else {
  174. memset(priv_ep->trb_pool, 0, ring_size);
  175. }
  176. if (!priv_ep->num)
  177. return 0;
  178. priv_ep->num_trbs = ring_size / TRB_SIZE;
  179. /* Initialize the last TRB as Link TRB. */
  180. link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1));
  181. link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
  182. link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
  183. return 0;
  184. }
  185. static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
  186. {
  187. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  188. if (priv_ep->trb_pool) {
  189. dma_free_coherent(priv_dev->sysdev,
  190. cdns3_ring_size(priv_ep),
  191. priv_ep->trb_pool, priv_ep->trb_pool_dma);
  192. priv_ep->trb_pool = NULL;
  193. }
  194. }
  195. /**
  196. * cdns3_ep_stall_flush - Stalls and flushes selected endpoint
  197. * @priv_ep: endpoint object
  198. *
  199. * Endpoint must be selected before call to this function
  200. */
  201. static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep)
  202. {
  203. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  204. int val;
  205. trace_cdns3_halt(priv_ep, 1, 1);
  206. writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL,
  207. &priv_dev->regs->ep_cmd);
  208. /* wait for DFLUSH cleared */
  209. readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
  210. !(val & EP_CMD_DFLUSH), 1, 1000);
  211. priv_ep->flags |= EP_STALLED;
  212. priv_ep->flags &= ~EP_STALL_PENDING;
  213. }
  214. /**
  215. * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller.
  216. * @priv_dev: extended gadget object
  217. */
  218. void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
  219. {
  220. writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf);
  221. cdns3_allow_enable_l1(priv_dev, 0);
  222. priv_dev->hw_configured_flag = 0;
  223. priv_dev->onchip_used_size = 0;
  224. priv_dev->out_mem_is_allocated = 0;
  225. priv_dev->wait_for_setup = 0;
  226. }
  227. /**
  228. * cdns3_ep_inc_trb - increment a trb index.
  229. * @index: Pointer to the TRB index to increment.
  230. * @cs: Cycle state
  231. * @trb_in_seg: number of TRBs in segment
  232. *
  233. * The index should never point to the link TRB. After incrementing,
  234. * if it is point to the link TRB, wrap around to the beginning and revert
  235. * cycle state bit The
  236. * link TRB is always at the last TRB entry.
  237. */
  238. static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
  239. {
  240. (*index)++;
  241. if (*index == (trb_in_seg - 1)) {
  242. *index = 0;
  243. *cs ^= 1;
  244. }
  245. }
  246. /**
  247. * cdns3_ep_inc_enq - increment endpoint's enqueue pointer
  248. * @priv_ep: The endpoint whose enqueue pointer we're incrementing
  249. */
  250. static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep)
  251. {
  252. priv_ep->free_trbs--;
  253. cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs);
  254. }
  255. /**
  256. * cdns3_ep_inc_deq - increment endpoint's dequeue pointer
  257. * @priv_ep: The endpoint whose dequeue pointer we're incrementing
  258. */
  259. static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep)
  260. {
  261. priv_ep->free_trbs++;
  262. cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
  263. }
  264. void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
  265. {
  266. struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
  267. int current_trb = priv_req->start_trb;
  268. while (current_trb != priv_req->end_trb) {
  269. cdns3_ep_inc_deq(priv_ep);
  270. current_trb = priv_ep->dequeue;
  271. }
  272. cdns3_ep_inc_deq(priv_ep);
  273. }
  274. /**
  275. * cdns3_allow_enable_l1 - enable/disable permits to transition to L1.
  276. * @priv_dev: Extended gadget object
  277. * @enable: Enable/disable permit to transition to L1.
  278. *
  279. * If bit USB_CONF_L1EN is set and device receive Extended Token packet,
  280. * then controller answer with ACK handshake.
  281. * If bit USB_CONF_L1DS is set and device receive Extended Token packet,
  282. * then controller answer with NYET handshake.
  283. */
  284. void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable)
  285. {
  286. if (enable)
  287. writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf);
  288. else
  289. writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf);
  290. }
  291. enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev)
  292. {
  293. u32 reg;
  294. reg = readl(&priv_dev->regs->usb_sts);
  295. if (DEV_SUPERSPEED(reg))
  296. return USB_SPEED_SUPER;
  297. else if (DEV_HIGHSPEED(reg))
  298. return USB_SPEED_HIGH;
  299. else if (DEV_FULLSPEED(reg))
  300. return USB_SPEED_FULL;
  301. else if (DEV_LOWSPEED(reg))
  302. return USB_SPEED_LOW;
  303. return USB_SPEED_UNKNOWN;
  304. }
  305. /**
  306. * cdns3_start_all_request - add to ring all request not started
  307. * @priv_dev: Extended gadget object
  308. * @priv_ep: The endpoint for whom request will be started.
  309. *
  310. * Returns return ENOMEM if transfer ring i not enough TRBs to start
  311. * all requests.
  312. */
  313. static int cdns3_start_all_request(struct cdns3_device *priv_dev,
  314. struct cdns3_endpoint *priv_ep)
  315. {
  316. struct cdns3_request *priv_req;
  317. struct usb_request *request;
  318. int ret = 0;
  319. while (!list_empty(&priv_ep->deferred_req_list)) {
  320. request = cdns3_next_request(&priv_ep->deferred_req_list);
  321. priv_req = to_cdns3_request(request);
  322. ret = cdns3_ep_run_transfer(priv_ep, request);
  323. if (ret)
  324. return ret;
  325. list_del(&request->list);
  326. list_add_tail(&request->list,
  327. &priv_ep->pending_req_list);
  328. }
  329. priv_ep->flags &= ~EP_RING_FULL;
  330. return ret;
  331. }
  332. /*
  333. * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set
  334. * driver try to detect whether endpoint need additional internal
  335. * buffer for unblocking on-chip FIFO buffer. This flag will be cleared
  336. * if before first DESCMISS interrupt the DMA will be armed.
  337. */
  338. #define cdns3_wa2_enable_detection(priv_dev, ep_priv, reg) do { \
  339. if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \
  340. priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \
  341. (reg) |= EP_STS_EN_DESCMISEN; \
  342. } } while (0)
  343. /**
  344. * cdns3_wa2_descmiss_copy_data copy data from internal requests to
  345. * request queued by class driver.
  346. * @priv_ep: extended endpoint object
  347. * @request: request object
  348. */
  349. static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
  350. struct usb_request *request)
  351. {
  352. struct usb_request *descmiss_req;
  353. struct cdns3_request *descmiss_priv_req;
  354. while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
  355. int chunk_end;
  356. int length;
  357. descmiss_priv_req =
  358. cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
  359. descmiss_req = &descmiss_priv_req->request;
  360. /* driver can't touch pending request */
  361. if (descmiss_priv_req->flags & REQUEST_PENDING)
  362. break;
  363. chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH;
  364. length = request->actual + descmiss_req->actual;
  365. request->status = descmiss_req->status;
  366. if (length <= request->length) {
  367. memcpy(&((u8 *)request->buf)[request->actual],
  368. descmiss_req->buf,
  369. descmiss_req->actual);
  370. request->actual = length;
  371. } else {
  372. /* It should never occures */
  373. request->status = -ENOMEM;
  374. }
  375. list_del_init(&descmiss_priv_req->list);
  376. kfree(descmiss_req->buf);
  377. cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req);
  378. --priv_ep->wa2_counter;
  379. if (!chunk_end)
  380. break;
  381. }
  382. }
  383. struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
  384. struct cdns3_endpoint *priv_ep,
  385. struct cdns3_request *priv_req)
  386. {
  387. if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN &&
  388. priv_req->flags & REQUEST_INTERNAL) {
  389. struct usb_request *req;
  390. req = cdns3_next_request(&priv_ep->deferred_req_list);
  391. priv_ep->descmis_req = NULL;
  392. if (!req)
  393. return NULL;
  394. cdns3_wa2_descmiss_copy_data(priv_ep, req);
  395. if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) &&
  396. req->length != req->actual) {
  397. /* wait for next part of transfer */
  398. return NULL;
  399. }
  400. if (req->status == -EINPROGRESS)
  401. req->status = 0;
  402. list_del_init(&req->list);
  403. cdns3_start_all_request(priv_dev, priv_ep);
  404. return req;
  405. }
  406. return &priv_req->request;
  407. }
  408. int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
  409. struct cdns3_endpoint *priv_ep,
  410. struct cdns3_request *priv_req)
  411. {
  412. int deferred = 0;
  413. /*
  414. * If transfer was queued before DESCMISS appear than we
  415. * can disable handling of DESCMISS interrupt. Driver assumes that it
  416. * can disable special treatment for this endpoint.
  417. */
  418. if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
  419. u32 reg;
  420. cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir);
  421. priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
  422. reg = readl(&priv_dev->regs->ep_sts_en);
  423. reg &= ~EP_STS_EN_DESCMISEN;
  424. trace_cdns3_wa2(priv_ep, "workaround disabled\n");
  425. writel(reg, &priv_dev->regs->ep_sts_en);
  426. }
  427. if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
  428. u8 pending_empty = list_empty(&priv_ep->pending_req_list);
  429. u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list);
  430. /*
  431. * DESCMISS transfer has been finished, so data will be
  432. * directly copied from internal allocated usb_request
  433. * objects.
  434. */
  435. if (pending_empty && !descmiss_empty &&
  436. !(priv_req->flags & REQUEST_INTERNAL)) {
  437. cdns3_wa2_descmiss_copy_data(priv_ep,
  438. &priv_req->request);
  439. trace_cdns3_wa2(priv_ep, "get internal stored data");
  440. list_add_tail(&priv_req->request.list,
  441. &priv_ep->pending_req_list);
  442. cdns3_gadget_giveback(priv_ep, priv_req,
  443. priv_req->request.status);
  444. /*
  445. * Intentionally driver returns positive value as
  446. * correct value. It informs that transfer has
  447. * been finished.
  448. */
  449. return EINPROGRESS;
  450. }
  451. /*
  452. * Driver will wait for completion DESCMISS transfer,
  453. * before starts new, not DESCMISS transfer.
  454. */
  455. if (!pending_empty && !descmiss_empty) {
  456. trace_cdns3_wa2(priv_ep, "wait for pending transfer\n");
  457. deferred = 1;
  458. }
  459. if (priv_req->flags & REQUEST_INTERNAL)
  460. list_add_tail(&priv_req->list,
  461. &priv_ep->wa2_descmiss_req_list);
  462. }
  463. return deferred;
  464. }
  465. static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep)
  466. {
  467. struct cdns3_request *priv_req;
  468. while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
  469. u8 chain;
  470. priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
  471. chain = !!(priv_req->flags & REQUEST_INTERNAL_CH);
  472. trace_cdns3_wa2(priv_ep, "removes eldest request");
  473. kfree(priv_req->request.buf);
  474. cdns3_gadget_ep_free_request(&priv_ep->endpoint,
  475. &priv_req->request);
  476. list_del_init(&priv_req->list);
  477. --priv_ep->wa2_counter;
  478. if (!chain)
  479. break;
  480. }
  481. }
  482. /**
  483. * cdns3_wa2_descmissing_packet - handles descriptor missing event.
  484. * @priv_dev: extended gadget object
  485. *
  486. * This function is used only for WA2. For more information see Work around 2
  487. * description.
  488. */
  489. static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
  490. {
  491. struct cdns3_request *priv_req;
  492. struct usb_request *request;
  493. if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
  494. priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
  495. priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN;
  496. }
  497. trace_cdns3_wa2(priv_ep, "Description Missing detected\n");
  498. if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS)
  499. cdns3_wa2_remove_old_request(priv_ep);
  500. request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint,
  501. GFP_ATOMIC);
  502. if (!request)
  503. goto err;
  504. priv_req = to_cdns3_request(request);
  505. priv_req->flags |= REQUEST_INTERNAL;
  506. /* if this field is still assigned it indicate that transfer related
  507. * with this request has not been finished yet. Driver in this
  508. * case simply allocate next request and assign flag REQUEST_INTERNAL_CH
  509. * flag to previous one. It will indicate that current request is
  510. * part of the previous one.
  511. */
  512. if (priv_ep->descmis_req)
  513. priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH;
  514. priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE,
  515. GFP_ATOMIC);
  516. priv_ep->wa2_counter++;
  517. if (!priv_req->request.buf) {
  518. cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
  519. goto err;
  520. }
  521. priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE;
  522. priv_ep->descmis_req = priv_req;
  523. __cdns3_gadget_ep_queue(&priv_ep->endpoint,
  524. &priv_ep->descmis_req->request,
  525. GFP_ATOMIC);
  526. return;
  527. err:
  528. dev_err(priv_ep->cdns3_dev->dev,
  529. "Failed: No sufficient memory for DESCMIS\n");
  530. }
  531. /**
  532. * cdns3_gadget_giveback - call struct usb_request's ->complete callback
  533. * @priv_ep: The endpoint to whom the request belongs to
  534. * @priv_req: The request we're giving back
  535. * @status: completion code for the request
  536. *
  537. * Must be called with controller's lock held and interrupts disabled. This
  538. * function will unmap @req and call its ->complete() callback to notify upper
  539. * layers that it has completed.
  540. */
  541. void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
  542. struct cdns3_request *priv_req,
  543. int status)
  544. {
  545. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  546. struct usb_request *request = &priv_req->request;
  547. list_del_init(&request->list);
  548. if (request->status == -EINPROGRESS)
  549. request->status = status;
  550. usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request,
  551. priv_ep->dir);
  552. if ((priv_req->flags & REQUEST_UNALIGNED) &&
  553. priv_ep->dir == USB_DIR_OUT && !request->status)
  554. memcpy(request->buf, priv_req->aligned_buf->buf,
  555. request->length);
  556. priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED);
  557. trace_cdns3_gadget_giveback(priv_req);
  558. if (priv_dev->dev_ver < DEV_VER_V2) {
  559. request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep,
  560. priv_req);
  561. if (!request)
  562. return;
  563. }
  564. if (request->complete) {
  565. spin_unlock(&priv_dev->lock);
  566. usb_gadget_giveback_request(&priv_ep->endpoint,
  567. request);
  568. spin_lock(&priv_dev->lock);
  569. }
  570. if (request->buf == priv_dev->zlp_buf)
  571. cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
  572. }
  573. void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
  574. {
  575. /* Work around for stale data address in TRB*/
  576. if (priv_ep->wa1_set) {
  577. trace_cdns3_wa1(priv_ep, "restore cycle bit");
  578. priv_ep->wa1_set = 0;
  579. priv_ep->wa1_trb_index = 0xFFFF;
  580. if (priv_ep->wa1_cycle_bit) {
  581. priv_ep->wa1_trb->control =
  582. priv_ep->wa1_trb->control | 0x1;
  583. } else {
  584. priv_ep->wa1_trb->control =
  585. priv_ep->wa1_trb->control & ~0x1;
  586. }
  587. }
  588. }
  589. static void cdns3_free_aligned_request_buf(struct work_struct *work)
  590. {
  591. struct cdns3_device *priv_dev = container_of(work, struct cdns3_device,
  592. aligned_buf_wq);
  593. struct cdns3_aligned_buf *buf, *tmp;
  594. unsigned long flags;
  595. spin_lock_irqsave(&priv_dev->lock, flags);
  596. list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) {
  597. if (!buf->in_use) {
  598. list_del(&buf->list);
  599. /*
  600. * Re-enable interrupts to free DMA capable memory.
  601. * Driver can't free this memory with disabled
  602. * interrupts.
  603. */
  604. spin_unlock_irqrestore(&priv_dev->lock, flags);
  605. dma_free_coherent(priv_dev->sysdev, buf->size,
  606. buf->buf, buf->dma);
  607. kfree(buf);
  608. spin_lock_irqsave(&priv_dev->lock, flags);
  609. }
  610. }
  611. spin_unlock_irqrestore(&priv_dev->lock, flags);
  612. }
  613. static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req)
  614. {
  615. struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
  616. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  617. struct cdns3_aligned_buf *buf;
  618. /* check if buffer is aligned to 8. */
  619. if (!((uintptr_t)priv_req->request.buf & 0x7))
  620. return 0;
  621. buf = priv_req->aligned_buf;
  622. if (!buf || priv_req->request.length > buf->size) {
  623. buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
  624. if (!buf)
  625. return -ENOMEM;
  626. buf->size = priv_req->request.length;
  627. buf->buf = dma_alloc_coherent(priv_dev->sysdev,
  628. buf->size,
  629. &buf->dma,
  630. GFP_ATOMIC);
  631. if (!buf->buf) {
  632. kfree(buf);
  633. return -ENOMEM;
  634. }
  635. if (priv_req->aligned_buf) {
  636. trace_cdns3_free_aligned_request(priv_req);
  637. priv_req->aligned_buf->in_use = 0;
  638. queue_work(system_freezable_wq,
  639. &priv_dev->aligned_buf_wq);
  640. }
  641. buf->in_use = 1;
  642. priv_req->aligned_buf = buf;
  643. list_add_tail(&buf->list,
  644. &priv_dev->aligned_buf_list);
  645. }
  646. if (priv_ep->dir == USB_DIR_IN) {
  647. memcpy(buf->buf, priv_req->request.buf,
  648. priv_req->request.length);
  649. }
  650. priv_req->flags |= REQUEST_UNALIGNED;
  651. trace_cdns3_prepare_aligned_request(priv_req);
  652. return 0;
  653. }
  654. static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep,
  655. struct cdns3_trb *trb)
  656. {
  657. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  658. if (!priv_ep->wa1_set) {
  659. u32 doorbell;
  660. doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
  661. if (doorbell) {
  662. priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0;
  663. priv_ep->wa1_set = 1;
  664. priv_ep->wa1_trb = trb;
  665. priv_ep->wa1_trb_index = priv_ep->enqueue;
  666. trace_cdns3_wa1(priv_ep, "set guard");
  667. return 0;
  668. }
  669. }
  670. return 1;
  671. }
  672. static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev,
  673. struct cdns3_endpoint *priv_ep)
  674. {
  675. int dma_index;
  676. u32 doorbell;
  677. doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
  678. dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
  679. if (!doorbell || dma_index != priv_ep->wa1_trb_index)
  680. cdns3_wa1_restore_cycle_bit(priv_ep);
  681. }
  682. /**
  683. * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
  684. * @priv_ep: endpoint object
  685. *
  686. * Returns zero on success or negative value on failure
  687. */
  688. int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
  689. struct usb_request *request)
  690. {
  691. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  692. struct cdns3_request *priv_req;
  693. struct cdns3_trb *trb;
  694. dma_addr_t trb_dma;
  695. u32 togle_pcs = 1;
  696. int sg_iter = 0;
  697. int num_trb;
  698. int address;
  699. u32 control;
  700. int pcs;
  701. if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
  702. num_trb = priv_ep->interval;
  703. else
  704. num_trb = request->num_sgs ? request->num_sgs : 1;
  705. if (num_trb > priv_ep->free_trbs) {
  706. priv_ep->flags |= EP_RING_FULL;
  707. return -ENOBUFS;
  708. }
  709. priv_req = to_cdns3_request(request);
  710. address = priv_ep->endpoint.desc->bEndpointAddress;
  711. priv_ep->flags |= EP_PENDING_REQUEST;
  712. /* must allocate buffer aligned to 8 */
  713. if (priv_req->flags & REQUEST_UNALIGNED)
  714. trb_dma = priv_req->aligned_buf->dma;
  715. else
  716. trb_dma = request->dma;
  717. trb = priv_ep->trb_pool + priv_ep->enqueue;
  718. priv_req->start_trb = priv_ep->enqueue;
  719. priv_req->trb = trb;
  720. cdns3_select_ep(priv_ep->cdns3_dev, address);
  721. /* prepare ring */
  722. if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) {
  723. struct cdns3_trb *link_trb;
  724. int doorbell, dma_index;
  725. u32 ch_bit = 0;
  726. doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
  727. dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
  728. /* Driver can't update LINK TRB if it is current processed. */
  729. if (doorbell && dma_index == priv_ep->num_trbs - 1) {
  730. priv_ep->flags |= EP_DEFERRED_DRDY;
  731. return -ENOBUFS;
  732. }
  733. /*updating C bt in Link TRB before starting DMA*/
  734. link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1);
  735. /*
  736. * For TRs size equal 2 enabling TRB_CHAIN for epXin causes
  737. * that DMA stuck at the LINK TRB.
  738. * On the other hand, removing TRB_CHAIN for longer TRs for
  739. * epXout cause that DMA stuck after handling LINK TRB.
  740. * To eliminate this strange behavioral driver set TRB_CHAIN
  741. * bit only for TR size > 2.
  742. */
  743. if (priv_ep->type == USB_ENDPOINT_XFER_ISOC ||
  744. TRBS_PER_SEGMENT > 2)
  745. ch_bit = TRB_CHAIN;
  746. link_trb->control = ((priv_ep->pcs) ? TRB_CYCLE : 0) |
  747. TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit;
  748. }
  749. if (priv_dev->dev_ver <= DEV_VER_V2)
  750. togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);
  751. /* set incorrect Cycle Bit for first trb*/
  752. control = priv_ep->pcs ? 0 : TRB_CYCLE;
  753. do {
  754. u32 length;
  755. u16 td_size = 0;
  756. /* fill TRB */
  757. control |= TRB_TYPE(TRB_NORMAL);
  758. trb->buffer = TRB_BUFFER(request->num_sgs == 0
  759. ? trb_dma : request->sg[sg_iter].dma_address);
  760. if (likely(!request->num_sgs))
  761. length = request->length;
  762. else
  763. length = request->sg[sg_iter].length;
  764. if (likely(priv_dev->dev_ver >= DEV_VER_V2))
  765. td_size = DIV_ROUND_UP(length,
  766. priv_ep->endpoint.maxpacket);
  767. trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) |
  768. TRB_LEN(length);
  769. if (priv_dev->gadget.speed == USB_SPEED_SUPER)
  770. trb->length |= TRB_TDL_SS_SIZE(td_size);
  771. else
  772. control |= TRB_TDL_HS_SIZE(td_size);
  773. pcs = priv_ep->pcs ? TRB_CYCLE : 0;
  774. /*
  775. * first trb should be prepared as last to avoid processing
  776. * transfer to early
  777. */
  778. if (sg_iter != 0)
  779. control |= pcs;
  780. if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) {
  781. control |= TRB_IOC | TRB_ISP;
  782. } else {
  783. /* for last element in TD or in SG list */
  784. if (sg_iter == (num_trb - 1) && sg_iter != 0)
  785. control |= pcs | TRB_IOC | TRB_ISP;
  786. }
  787. if (sg_iter)
  788. trb->control = control;
  789. else
  790. priv_req->trb->control = control;
  791. control = 0;
  792. ++sg_iter;
  793. priv_req->end_trb = priv_ep->enqueue;
  794. cdns3_ep_inc_enq(priv_ep);
  795. trb = priv_ep->trb_pool + priv_ep->enqueue;
  796. } while (sg_iter < num_trb);
  797. trb = priv_req->trb;
  798. priv_req->flags |= REQUEST_PENDING;
  799. if (sg_iter == 1)
  800. trb->control |= TRB_IOC | TRB_ISP;
  801. /*
  802. * Memory barrier - cycle bit must be set before other filds in trb.
  803. */
  804. wmb();
  805. /* give the TD to the consumer*/
  806. if (togle_pcs)
  807. trb->control = trb->control ^ 1;
  808. if (priv_dev->dev_ver <= DEV_VER_V2)
  809. cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep);
  810. trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
  811. /*
  812. * Memory barrier - Cycle Bit must be set before trb->length and
  813. * trb->buffer fields.
  814. */
  815. wmb();
  816. /*
  817. * For DMULT mode we can set address to transfer ring only once after
  818. * enabling endpoint.
  819. */
  820. if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) {
  821. /*
  822. * Until SW is not ready to handle the OUT transfer the ISO OUT
  823. * Endpoint should be disabled (EP_CFG.ENABLE = 0).
  824. * EP_CFG_ENABLE must be set before updating ep_traddr.
  825. */
  826. if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir &&
  827. !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) {
  828. priv_ep->flags |= EP_QUIRK_ISO_OUT_EN;
  829. cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
  830. EP_CFG_ENABLE);
  831. }
  832. writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma +
  833. priv_req->start_trb * TRB_SIZE),
  834. &priv_dev->regs->ep_traddr);
  835. priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR;
  836. }
  837. if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) {
  838. trace_cdns3_ring(priv_ep);
  839. /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
  840. writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
  841. writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
  842. trace_cdns3_doorbell_epx(priv_ep->name,
  843. readl(&priv_dev->regs->ep_traddr));
  844. }
  845. /* WORKAROUND for transition to L0 */
  846. __cdns3_gadget_wakeup(priv_dev);
  847. return 0;
  848. }
  849. void cdns3_set_hw_configuration(struct cdns3_device *priv_dev)
  850. {
  851. struct cdns3_endpoint *priv_ep;
  852. struct usb_ep *ep;
  853. int val;
  854. if (priv_dev->hw_configured_flag)
  855. return;
  856. writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf);
  857. writel(EP_CMD_ERDY | EP_CMD_REQ_CMPL, &priv_dev->regs->ep_cmd);
  858. cdns3_set_register_bit(&priv_dev->regs->usb_conf,
  859. USB_CONF_U1EN | USB_CONF_U2EN);
  860. /* wait until configuration set */
  861. readl_poll_timeout_atomic(&priv_dev->regs->usb_sts, val,
  862. val & USB_STS_CFGSTS_MASK, 1, 100);
  863. priv_dev->hw_configured_flag = 1;
  864. list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
  865. if (ep->enabled) {
  866. priv_ep = ep_to_cdns3_ep(ep);
  867. cdns3_start_all_request(priv_dev, priv_ep);
  868. }
  869. }
  870. }
  871. /**
  872. * cdns3_request_handled - check whether request has been handled by DMA
  873. *
  874. * @priv_ep: extended endpoint object.
  875. * @priv_req: request object for checking
  876. *
  877. * Endpoint must be selected before invoking this function.
  878. *
  879. * Returns false if request has not been handled by DMA, else returns true.
  880. *
  881. * SR - start ring
  882. * ER - end ring
  883. * DQ = priv_ep->dequeue - dequeue position
  884. * EQ = priv_ep->enqueue - enqueue position
  885. * ST = priv_req->start_trb - index of first TRB in transfer ring
  886. * ET = priv_req->end_trb - index of last TRB in transfer ring
  887. * CI = current_index - index of processed TRB by DMA.
  888. *
  889. * As first step, function checks if cycle bit for priv_req->start_trb is
  890. * correct.
  891. *
  892. * some rules:
  893. * 1. priv_ep->dequeue never exceed current_index.
  894. * 2 priv_ep->enqueue never exceed priv_ep->dequeue
  895. * 3. exception: priv_ep->enqueue == priv_ep->dequeue
  896. * and priv_ep->free_trbs is zero.
  897. * This case indicate that TR is full.
  898. *
  899. * Then We can split recognition into two parts:
  900. * Case 1 - priv_ep->dequeue < current_index
  901. * SR ... EQ ... DQ ... CI ... ER
  902. * SR ... DQ ... CI ... EQ ... ER
  903. *
  904. * Request has been handled by DMA if ST and ET is between DQ and CI.
  905. *
  906. * Case 2 - priv_ep->dequeue > current_index
  907. * This situation take place when CI go through the LINK TRB at the end of
  908. * transfer ring.
  909. * SR ... CI ... EQ ... DQ ... ER
  910. *
  911. * Request has been handled by DMA if ET is less then CI or
  912. * ET is greater or equal DQ.
  913. */
  914. static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep,
  915. struct cdns3_request *priv_req)
  916. {
  917. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  918. struct cdns3_trb *trb = priv_req->trb;
  919. int current_index = 0;
  920. int handled = 0;
  921. int doorbell;
  922. current_index = cdns3_get_dma_pos(priv_dev, priv_ep);
  923. doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
  924. trb = &priv_ep->trb_pool[priv_req->start_trb];
  925. if ((trb->control & TRB_CYCLE) != priv_ep->ccs)
  926. goto finish;
  927. if (doorbell == 1 && current_index == priv_ep->dequeue)
  928. goto finish;
  929. /* The corner case for TRBS_PER_SEGMENT equal 2). */
  930. if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
  931. handled = 1;
  932. goto finish;
  933. }
  934. if (priv_ep->enqueue == priv_ep->dequeue &&
  935. priv_ep->free_trbs == 0) {
  936. handled = 1;
  937. } else if (priv_ep->dequeue < current_index) {
  938. if ((current_index == (priv_ep->num_trbs - 1)) &&
  939. !priv_ep->dequeue)
  940. goto finish;
  941. if (priv_req->end_trb >= priv_ep->dequeue &&
  942. priv_req->end_trb < current_index)
  943. handled = 1;
  944. } else if (priv_ep->dequeue > current_index) {
  945. if (priv_req->end_trb < current_index ||
  946. priv_req->end_trb >= priv_ep->dequeue)
  947. handled = 1;
  948. }
  949. finish:
  950. trace_cdns3_request_handled(priv_req, current_index, handled);
  951. return handled;
  952. }
  953. static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
  954. struct cdns3_endpoint *priv_ep)
  955. {
  956. struct cdns3_request *priv_req;
  957. struct usb_request *request;
  958. struct cdns3_trb *trb;
  959. while (!list_empty(&priv_ep->pending_req_list)) {
  960. request = cdns3_next_request(&priv_ep->pending_req_list);
  961. priv_req = to_cdns3_request(request);
  962. /* Re-select endpoint. It could be changed by other CPU during
  963. * handling usb_gadget_giveback_request.
  964. */
  965. cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
  966. if (!cdns3_request_handled(priv_ep, priv_req))
  967. goto prepare_next_td;
  968. trb = priv_ep->trb_pool + priv_ep->dequeue;
  969. trace_cdns3_complete_trb(priv_ep, trb);
  970. if (trb != priv_req->trb)
  971. dev_warn(priv_dev->dev,
  972. "request_trb=0x%p, queue_trb=0x%p\n",
  973. priv_req->trb, trb);
  974. request->actual = TRB_LEN(le32_to_cpu(trb->length));
  975. cdns3_move_deq_to_next_trb(priv_req);
  976. cdns3_gadget_giveback(priv_ep, priv_req, 0);
  977. if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
  978. TRBS_PER_SEGMENT == 2)
  979. break;
  980. }
  981. priv_ep->flags &= ~EP_PENDING_REQUEST;
  982. prepare_next_td:
  983. if (!(priv_ep->flags & EP_STALLED) &&
  984. !(priv_ep->flags & EP_STALL_PENDING))
  985. cdns3_start_all_request(priv_dev, priv_ep);
  986. }
  987. void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm)
  988. {
  989. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  990. cdns3_wa1_restore_cycle_bit(priv_ep);
  991. if (rearm) {
  992. trace_cdns3_ring(priv_ep);
  993. /* Cycle Bit must be updated before arming DMA. */
  994. wmb();
  995. writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
  996. __cdns3_gadget_wakeup(priv_dev);
  997. trace_cdns3_doorbell_epx(priv_ep->name,
  998. readl(&priv_dev->regs->ep_traddr));
  999. }
  1000. }
  1001. /**
  1002. * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint
  1003. * @priv_ep: endpoint object
  1004. *
  1005. * Returns 0
  1006. */
  1007. static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
  1008. {
  1009. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  1010. u32 ep_sts_reg;
  1011. cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
  1012. trace_cdns3_epx_irq(priv_dev, priv_ep);
  1013. ep_sts_reg = readl(&priv_dev->regs->ep_sts);
  1014. writel(ep_sts_reg, &priv_dev->regs->ep_sts);
  1015. if (ep_sts_reg & EP_STS_TRBERR) {
  1016. if (priv_ep->flags & EP_STALL_PENDING &&
  1017. !(ep_sts_reg & EP_STS_DESCMIS &&
  1018. priv_dev->dev_ver < DEV_VER_V2)) {
  1019. cdns3_ep_stall_flush(priv_ep);
  1020. }
  1021. /*
  1022. * For isochronous transfer driver completes request on
  1023. * IOC or on TRBERR. IOC appears only when device receive
  1024. * OUT data packet. If host disable stream or lost some packet
  1025. * then the only way to finish all queued transfer is to do it
  1026. * on TRBERR event.
  1027. */
  1028. if (priv_ep->type == USB_ENDPOINT_XFER_ISOC &&
  1029. !priv_ep->wa1_set) {
  1030. if (!priv_ep->dir) {
  1031. u32 ep_cfg = readl(&priv_dev->regs->ep_cfg);
  1032. ep_cfg &= ~EP_CFG_ENABLE;
  1033. writel(ep_cfg, &priv_dev->regs->ep_cfg);
  1034. priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
  1035. }
  1036. cdns3_transfer_completed(priv_dev, priv_ep);
  1037. } else if (!(priv_ep->flags & EP_STALLED) &&
  1038. !(priv_ep->flags & EP_STALL_PENDING)) {
  1039. if (priv_ep->flags & EP_DEFERRED_DRDY) {
  1040. priv_ep->flags &= ~EP_DEFERRED_DRDY;
  1041. cdns3_start_all_request(priv_dev, priv_ep);
  1042. } else {
  1043. cdns3_rearm_transfer(priv_ep,
  1044. priv_ep->wa1_set);
  1045. }
  1046. }
  1047. }
  1048. if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) {
  1049. if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
  1050. if (ep_sts_reg & EP_STS_ISP)
  1051. priv_ep->flags |= EP_QUIRK_END_TRANSFER;
  1052. else
  1053. priv_ep->flags &= ~EP_QUIRK_END_TRANSFER;
  1054. }
  1055. cdns3_transfer_completed(priv_dev, priv_ep);
  1056. }
  1057. /*
  1058. * WA2: this condition should only be meet when
  1059. * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or
  1060. * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN.
  1061. * In other cases this interrupt will be disabled/
  1062. */
  1063. if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 &&
  1064. !(priv_ep->flags & EP_STALLED))
  1065. cdns3_wa2_descmissing_packet(priv_ep);
  1066. return 0;
  1067. }
  1068. static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev)
  1069. {
  1070. if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) {
  1071. spin_unlock(&priv_dev->lock);
  1072. priv_dev->gadget_driver->disconnect(&priv_dev->gadget);
  1073. spin_lock(&priv_dev->lock);
  1074. }
  1075. }
  1076. /**
  1077. * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device
  1078. * @priv_dev: extended gadget object
  1079. * @usb_ists: bitmap representation of device's reported interrupts
  1080. * (usb_ists register value)
  1081. */
  1082. static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
  1083. u32 usb_ists)
  1084. {
  1085. int speed = 0;
  1086. trace_cdns3_usb_irq(priv_dev, usb_ists);
  1087. if (usb_ists & USB_ISTS_L1ENTI) {
  1088. /*
  1089. * WORKAROUND: CDNS3 controller has issue with hardware resuming
  1090. * from L1. To fix it, if any DMA transfer is pending driver
  1091. * must starts driving resume signal immediately.
  1092. */
  1093. if (readl(&priv_dev->regs->drbl))
  1094. __cdns3_gadget_wakeup(priv_dev);
  1095. }
  1096. /* Connection detected */
  1097. if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) {
  1098. speed = cdns3_get_speed(priv_dev);
  1099. priv_dev->gadget.speed = speed;
  1100. usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED);
  1101. cdns3_ep0_config(priv_dev);
  1102. }
  1103. /* Disconnection detected */
  1104. if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) {
  1105. cdns3_disconnect_gadget(priv_dev);
  1106. priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
  1107. usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
  1108. cdns3_hw_reset_eps_config(priv_dev);
  1109. }
  1110. if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) {
  1111. if (priv_dev->gadget_driver &&
  1112. priv_dev->gadget_driver->suspend) {
  1113. spin_unlock(&priv_dev->lock);
  1114. priv_dev->gadget_driver->suspend(&priv_dev->gadget);
  1115. spin_lock(&priv_dev->lock);
  1116. }
  1117. }
  1118. if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) {
  1119. if (priv_dev->gadget_driver &&
  1120. priv_dev->gadget_driver->resume) {
  1121. spin_unlock(&priv_dev->lock);
  1122. priv_dev->gadget_driver->resume(&priv_dev->gadget);
  1123. spin_lock(&priv_dev->lock);
  1124. }
  1125. }
  1126. /* reset*/
  1127. if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) {
  1128. if (priv_dev->gadget_driver) {
  1129. spin_unlock(&priv_dev->lock);
  1130. usb_gadget_udc_reset(&priv_dev->gadget,
  1131. priv_dev->gadget_driver);
  1132. spin_lock(&priv_dev->lock);
  1133. /*read again to check the actual speed*/
  1134. speed = cdns3_get_speed(priv_dev);
  1135. priv_dev->gadget.speed = speed;
  1136. cdns3_hw_reset_eps_config(priv_dev);
  1137. cdns3_ep0_config(priv_dev);
  1138. }
  1139. }
  1140. }
  1141. /**
  1142. * cdns3_device_irq_handler- interrupt handler for device part of controller
  1143. *
  1144. * @irq: irq number for cdns3 core device
  1145. * @data: structure of cdns3
  1146. *
  1147. * Returns IRQ_HANDLED or IRQ_NONE
  1148. */
  1149. static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
  1150. {
  1151. struct cdns3_device *priv_dev;
  1152. struct cdns3 *cdns = data;
  1153. irqreturn_t ret = IRQ_NONE;
  1154. u32 reg;
  1155. priv_dev = cdns->gadget_dev;
  1156. /* check USB device interrupt */
  1157. reg = readl(&priv_dev->regs->usb_ists);
  1158. if (reg) {
  1159. /* After masking interrupts the new interrupts won't be
  1160. * reported in usb_ists/ep_ists. In order to not lose some
  1161. * of them driver disables only detected interrupts.
  1162. * They will be enabled ASAP after clearing source of
  1163. * interrupt. This an unusual behavior only applies to
  1164. * usb_ists register.
  1165. */
  1166. reg = ~reg & readl(&priv_dev->regs->usb_ien);
  1167. /* mask deferred interrupt. */
  1168. writel(reg, &priv_dev->regs->usb_ien);
  1169. ret = IRQ_WAKE_THREAD;
  1170. }
  1171. /* check endpoint interrupt */
  1172. reg = readl(&priv_dev->regs->ep_ists);
  1173. if (reg) {
  1174. writel(0, &priv_dev->regs->ep_ien);
  1175. ret = IRQ_WAKE_THREAD;
  1176. }
  1177. return ret;
  1178. }
  1179. /**
  1180. * cdns3_device_thread_irq_handler- interrupt handler for device part
  1181. * of controller
  1182. *
  1183. * @irq: irq number for cdns3 core device
  1184. * @data: structure of cdns3
  1185. *
  1186. * Returns IRQ_HANDLED or IRQ_NONE
  1187. */
  1188. static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
  1189. {
  1190. struct cdns3_device *priv_dev;
  1191. struct cdns3 *cdns = data;
  1192. irqreturn_t ret = IRQ_NONE;
  1193. unsigned long flags;
  1194. int bit;
  1195. u32 reg;
  1196. priv_dev = cdns->gadget_dev;
  1197. spin_lock_irqsave(&priv_dev->lock, flags);
  1198. reg = readl(&priv_dev->regs->usb_ists);
  1199. if (reg) {
  1200. writel(reg, &priv_dev->regs->usb_ists);
  1201. writel(USB_IEN_INIT, &priv_dev->regs->usb_ien);
  1202. cdns3_check_usb_interrupt_proceed(priv_dev, reg);
  1203. ret = IRQ_HANDLED;
  1204. }
  1205. reg = readl(&priv_dev->regs->ep_ists);
  1206. /* handle default endpoint OUT */
  1207. if (reg & EP_ISTS_EP_OUT0) {
  1208. cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT);
  1209. ret = IRQ_HANDLED;
  1210. }
  1211. /* handle default endpoint IN */
  1212. if (reg & EP_ISTS_EP_IN0) {
  1213. cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN);
  1214. ret = IRQ_HANDLED;
  1215. }
  1216. /* check if interrupt from non default endpoint, if no exit */
  1217. reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0);
  1218. if (!reg)
  1219. goto irqend;
  1220. for_each_set_bit(bit, (unsigned long *)&reg,
  1221. sizeof(u32) * BITS_PER_BYTE) {
  1222. cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]);
  1223. ret = IRQ_HANDLED;
  1224. }
  1225. irqend:
  1226. writel(~0, &priv_dev->regs->ep_ien);
  1227. spin_unlock_irqrestore(&priv_dev->lock, flags);
  1228. return ret;
  1229. }
  1230. /**
  1231. * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP
  1232. *
  1233. * The real reservation will occur during write to EP_CFG register,
  1234. * this function is used to check if the 'size' reservation is allowed.
  1235. *
  1236. * @priv_dev: extended gadget object
  1237. * @size: the size (KB) for EP would like to allocate
  1238. * @is_in: endpoint direction
  1239. *
  1240. * Return 0 if the required size can met or negative value on failure
  1241. */
  1242. static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
  1243. int size, int is_in)
  1244. {
  1245. int remained;
  1246. /* 2KB are reserved for EP0*/
  1247. remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2;
  1248. if (is_in) {
  1249. if (remained < size)
  1250. return -EPERM;
  1251. priv_dev->onchip_used_size += size;
  1252. } else {
  1253. int required;
  1254. /**
  1255. * ALL OUT EPs are shared the same chunk onchip memory, so
  1256. * driver checks if it already has assigned enough buffers
  1257. */
  1258. if (priv_dev->out_mem_is_allocated >= size)
  1259. return 0;
  1260. required = size - priv_dev->out_mem_is_allocated;
  1261. if (required > remained)
  1262. return -EPERM;
  1263. priv_dev->out_mem_is_allocated += required;
  1264. priv_dev->onchip_used_size += required;
  1265. }
  1266. return 0;
  1267. }
  1268. void cdns3_configure_dmult(struct cdns3_device *priv_dev,
  1269. struct cdns3_endpoint *priv_ep)
  1270. {
  1271. struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
  1272. /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */
  1273. if (priv_dev->dev_ver <= DEV_VER_V2)
  1274. writel(USB_CONF_DMULT, &regs->usb_conf);
  1275. if (priv_dev->dev_ver == DEV_VER_V2)
  1276. writel(USB_CONF2_EN_TDL_TRB, &regs->usb_conf2);
  1277. if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) {
  1278. u32 mask;
  1279. if (priv_ep->dir)
  1280. mask = BIT(priv_ep->num + 16);
  1281. else
  1282. mask = BIT(priv_ep->num);
  1283. if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
  1284. cdns3_set_register_bit(&regs->tdl_from_trb, mask);
  1285. cdns3_set_register_bit(&regs->tdl_beh, mask);
  1286. cdns3_set_register_bit(&regs->tdl_beh2, mask);
  1287. cdns3_set_register_bit(&regs->dma_adv_td, mask);
  1288. }
  1289. if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
  1290. cdns3_set_register_bit(&regs->tdl_from_trb, mask);
  1291. cdns3_set_register_bit(&regs->dtrans, mask);
  1292. }
  1293. }
  1294. /**
  1295. * cdns3_ep_config Configure hardware endpoint
  1296. * @priv_ep: extended endpoint object
  1297. */
  1298. void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
  1299. {
  1300. bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
  1301. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  1302. u32 bEndpointAddress = priv_ep->num | priv_ep->dir;
  1303. u32 max_packet_size = 0;
  1304. u8 maxburst = 0;
  1305. u32 ep_cfg = 0;
  1306. u8 buffering;
  1307. u8 mult = 0;
  1308. int ret;
  1309. buffering = CDNS3_EP_BUF_SIZE - 1;
  1310. cdns3_configure_dmult(priv_dev, priv_ep);
  1311. switch (priv_ep->type) {
  1312. case USB_ENDPOINT_XFER_INT:
  1313. ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
  1314. if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
  1315. priv_dev->dev_ver > DEV_VER_V2)
  1316. ep_cfg |= EP_CFG_TDL_CHK;
  1317. break;
  1318. case USB_ENDPOINT_XFER_BULK:
  1319. ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
  1320. if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
  1321. priv_dev->dev_ver > DEV_VER_V2)
  1322. ep_cfg |= EP_CFG_TDL_CHK;
  1323. break;
  1324. default:
  1325. ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
  1326. mult = CDNS3_EP_ISO_HS_MULT - 1;
  1327. buffering = mult + 1;
  1328. }
  1329. switch (priv_dev->gadget.speed) {
  1330. case USB_SPEED_FULL:
  1331. max_packet_size = is_iso_ep ? 1023 : 64;
  1332. break;
  1333. case USB_SPEED_HIGH:
  1334. max_packet_size = is_iso_ep ? 1024 : 512;
  1335. break;
  1336. case USB_SPEED_SUPER:
  1337. /* It's limitation that driver assumes in driver. */
  1338. mult = 0;
  1339. max_packet_size = 1024;
  1340. if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
  1341. maxburst = CDNS3_EP_ISO_SS_BURST - 1;
  1342. buffering = (mult + 1) *
  1343. (maxburst + 1);
  1344. if (priv_ep->interval > 1)
  1345. buffering++;
  1346. } else {
  1347. maxburst = CDNS3_EP_BUF_SIZE - 1;
  1348. }
  1349. break;
  1350. default:
  1351. /* all other speed are not supported */
  1352. return;
  1353. }
  1354. if (max_packet_size == 1024)
  1355. priv_ep->trb_burst_size = 128;
  1356. else if (max_packet_size >= 512)
  1357. priv_ep->trb_burst_size = 64;
  1358. else
  1359. priv_ep->trb_burst_size = 16;
  1360. ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
  1361. !!priv_ep->dir);
  1362. if (ret) {
  1363. dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n");
  1364. return;
  1365. }
  1366. ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
  1367. EP_CFG_MULT(mult) |
  1368. EP_CFG_BUFFERING(buffering) |
  1369. EP_CFG_MAXBURST(maxburst);
  1370. cdns3_select_ep(priv_dev, bEndpointAddress);
  1371. writel(ep_cfg, &priv_dev->regs->ep_cfg);
  1372. dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n",
  1373. priv_ep->name, ep_cfg);
  1374. }
  1375. /* Find correct direction for HW endpoint according to description */
  1376. static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc,
  1377. struct cdns3_endpoint *priv_ep)
  1378. {
  1379. return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) ||
  1380. (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc));
  1381. }
  1382. static struct
  1383. cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev,
  1384. struct usb_endpoint_descriptor *desc)
  1385. {
  1386. struct usb_ep *ep;
  1387. struct cdns3_endpoint *priv_ep;
  1388. list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
  1389. unsigned long num;
  1390. int ret;
  1391. /* ep name pattern likes epXin or epXout */
  1392. char c[2] = {ep->name[2], '\0'};
  1393. ret = kstrtoul(c, 10, &num);
  1394. if (ret)
  1395. return ERR_PTR(ret);
  1396. priv_ep = ep_to_cdns3_ep(ep);
  1397. if (cdns3_ep_dir_is_correct(desc, priv_ep)) {
  1398. if (!(priv_ep->flags & EP_CLAIMED)) {
  1399. priv_ep->num = num;
  1400. return priv_ep;
  1401. }
  1402. }
  1403. }
  1404. return ERR_PTR(-ENOENT);
  1405. }
  1406. /*
  1407. * Cadence IP has one limitation that all endpoints must be configured
  1408. * (Type & MaxPacketSize) before setting configuration through hardware
  1409. * register, it means we can't change endpoints configuration after
  1410. * set_configuration.
  1411. *
  1412. * This function set EP_CLAIMED flag which is added when the gadget driver
  1413. * uses usb_ep_autoconfig to configure specific endpoint;
  1414. * When the udc driver receives set_configurion request,
  1415. * it goes through all claimed endpoints, and configure all endpoints
  1416. * accordingly.
  1417. *
  1418. * At usb_ep_ops.enable/disable, we only enable and disable endpoint through
  1419. * ep_cfg register which can be changed after set_configuration, and do
  1420. * some software operation accordingly.
  1421. */
  1422. static struct
  1423. usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget,
  1424. struct usb_endpoint_descriptor *desc,
  1425. struct usb_ss_ep_comp_descriptor *comp_desc)
  1426. {
  1427. struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
  1428. struct cdns3_endpoint *priv_ep;
  1429. unsigned long flags;
  1430. priv_ep = cdns3_find_available_ep(priv_dev, desc);
  1431. if (IS_ERR(priv_ep)) {
  1432. dev_err(priv_dev->dev, "no available ep\n");
  1433. return NULL;
  1434. }
  1435. dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name);
  1436. spin_lock_irqsave(&priv_dev->lock, flags);
  1437. priv_ep->endpoint.desc = desc;
  1438. priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT;
  1439. priv_ep->type = usb_endpoint_type(desc);
  1440. priv_ep->flags |= EP_CLAIMED;
  1441. priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
  1442. spin_unlock_irqrestore(&priv_dev->lock, flags);
  1443. return &priv_ep->endpoint;
  1444. }
  1445. /**
  1446. * cdns3_gadget_ep_alloc_request Allocates request
  1447. * @ep: endpoint object associated with request
  1448. * @gfp_flags: gfp flags
  1449. *
  1450. * Returns allocated request address, NULL on allocation error
  1451. */
  1452. struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep,
  1453. gfp_t gfp_flags)
  1454. {
  1455. struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
  1456. struct cdns3_request *priv_req;
  1457. priv_req = kzalloc(sizeof(*priv_req), gfp_flags);
  1458. if (!priv_req)
  1459. return NULL;
  1460. priv_req->priv_ep = priv_ep;
  1461. trace_cdns3_alloc_request(priv_req);
  1462. return &priv_req->request;
  1463. }
  1464. /**
  1465. * cdns3_gadget_ep_free_request Free memory occupied by request
  1466. * @ep: endpoint object associated with request
  1467. * @request: request to free memory
  1468. */
  1469. void cdns3_gadget_ep_free_request(struct usb_ep *ep,
  1470. struct usb_request *request)
  1471. {
  1472. struct cdns3_request *priv_req = to_cdns3_request(request);
  1473. if (priv_req->aligned_buf)
  1474. priv_req->aligned_buf->in_use = 0;
  1475. trace_cdns3_free_request(priv_req);
  1476. kfree(priv_req);
  1477. }
  1478. /**
  1479. * cdns3_gadget_ep_enable Enable endpoint
  1480. * @ep: endpoint object
  1481. * @desc: endpoint descriptor
  1482. *
  1483. * Returns 0 on success, error code elsewhere
  1484. */
  1485. static int cdns3_gadget_ep_enable(struct usb_ep *ep,
  1486. const struct usb_endpoint_descriptor *desc)
  1487. {
  1488. struct cdns3_endpoint *priv_ep;
  1489. struct cdns3_device *priv_dev;
  1490. u32 reg = EP_STS_EN_TRBERREN;
  1491. u32 bEndpointAddress;
  1492. unsigned long flags;
  1493. int enable = 1;
  1494. int ret;
  1495. int val;
  1496. priv_ep = ep_to_cdns3_ep(ep);
  1497. priv_dev = priv_ep->cdns3_dev;
  1498. if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
  1499. dev_dbg(priv_dev->dev, "usbss: invalid parameters\n");
  1500. return -EINVAL;
  1501. }
  1502. if (!desc->wMaxPacketSize) {
  1503. dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n");
  1504. return -EINVAL;
  1505. }
  1506. if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED,
  1507. "%s is already enabled\n", priv_ep->name))
  1508. return 0;
  1509. spin_lock_irqsave(&priv_dev->lock, flags);
  1510. priv_ep->endpoint.desc = desc;
  1511. priv_ep->type = usb_endpoint_type(desc);
  1512. priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
  1513. if (priv_ep->interval > ISO_MAX_INTERVAL &&
  1514. priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
  1515. dev_err(priv_dev->dev, "Driver is limited to %d period\n",
  1516. ISO_MAX_INTERVAL);
  1517. ret = -EINVAL;
  1518. goto exit;
  1519. }
  1520. ret = cdns3_allocate_trb_pool(priv_ep);
  1521. if (ret)
  1522. goto exit;
  1523. bEndpointAddress = priv_ep->num | priv_ep->dir;
  1524. cdns3_select_ep(priv_dev, bEndpointAddress);
  1525. trace_cdns3_gadget_ep_enable(priv_ep);
  1526. writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
  1527. ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
  1528. !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
  1529. 1, 1000);
  1530. if (unlikely(ret)) {
  1531. cdns3_free_trb_pool(priv_ep);
  1532. ret = -EINVAL;
  1533. goto exit;
  1534. }
  1535. /* enable interrupt for selected endpoint */
  1536. cdns3_set_register_bit(&priv_dev->regs->ep_ien,
  1537. BIT(cdns3_ep_addr_to_index(bEndpointAddress)));
  1538. if (priv_dev->dev_ver < DEV_VER_V2)
  1539. cdns3_wa2_enable_detection(priv_dev, priv_ep, reg);
  1540. writel(reg, &priv_dev->regs->ep_sts_en);
  1541. /*
  1542. * For some versions of controller at some point during ISO OUT traffic
  1543. * DMA reads Transfer Ring for the EP which has never got doorbell.
  1544. * This issue was detected only on simulation, but to avoid this issue
  1545. * driver add protection against it. To fix it driver enable ISO OUT
  1546. * endpoint before setting DRBL. This special treatment of ISO OUT
  1547. * endpoints are recommended by controller specification.
  1548. */
  1549. if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
  1550. enable = 0;
  1551. if (enable)
  1552. cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_ENABLE);
  1553. ep->desc = desc;
  1554. priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING |
  1555. EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN);
  1556. priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR;
  1557. priv_ep->wa1_set = 0;
  1558. priv_ep->enqueue = 0;
  1559. priv_ep->dequeue = 0;
  1560. reg = readl(&priv_dev->regs->ep_sts);
  1561. priv_ep->pcs = !!EP_STS_CCS(reg);
  1562. priv_ep->ccs = !!EP_STS_CCS(reg);
  1563. /* one TRB is reserved for link TRB used in DMULT mode*/
  1564. priv_ep->free_trbs = priv_ep->num_trbs - 1;
  1565. exit:
  1566. spin_unlock_irqrestore(&priv_dev->lock, flags);
  1567. return ret;
  1568. }
  1569. /**
  1570. * cdns3_gadget_ep_disable Disable endpoint
  1571. * @ep: endpoint object
  1572. *
  1573. * Returns 0 on success, error code elsewhere
  1574. */
  1575. static int cdns3_gadget_ep_disable(struct usb_ep *ep)
  1576. {
  1577. struct cdns3_endpoint *priv_ep;
  1578. struct cdns3_request *priv_req;
  1579. struct cdns3_device *priv_dev;
  1580. struct usb_request *request;
  1581. unsigned long flags;
  1582. int ret = 0;
  1583. u32 ep_cfg;
  1584. int val;
  1585. if (!ep) {
  1586. pr_err("usbss: invalid parameters\n");
  1587. return -EINVAL;
  1588. }
  1589. priv_ep = ep_to_cdns3_ep(ep);
  1590. priv_dev = priv_ep->cdns3_dev;
  1591. if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED),
  1592. "%s is already disabled\n", priv_ep->name))
  1593. return 0;
  1594. spin_lock_irqsave(&priv_dev->lock, flags);
  1595. trace_cdns3_gadget_ep_disable(priv_ep);
  1596. cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
  1597. ep_cfg = readl(&priv_dev->regs->ep_cfg);
  1598. ep_cfg &= ~EP_CFG_ENABLE;
  1599. writel(ep_cfg, &priv_dev->regs->ep_cfg);
  1600. /**
  1601. * Driver needs some time before resetting endpoint.
  1602. * It need waits for clearing DBUSY bit or for timeout expired.
  1603. * 10us is enough time for controller to stop transfer.
  1604. */
  1605. readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val,
  1606. !(val & EP_STS_DBUSY), 1, 10);
  1607. writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
  1608. readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
  1609. !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
  1610. 1, 1000);
  1611. if (unlikely(ret))
  1612. dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n",
  1613. priv_ep->name);
  1614. while (!list_empty(&priv_ep->pending_req_list)) {
  1615. request = cdns3_next_request(&priv_ep->pending_req_list);
  1616. cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
  1617. -ESHUTDOWN);
  1618. }
  1619. while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
  1620. priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
  1621. kfree(priv_req->request.buf);
  1622. cdns3_gadget_ep_free_request(&priv_ep->endpoint,
  1623. &priv_req->request);
  1624. list_del_init(&priv_req->list);
  1625. --priv_ep->wa2_counter;
  1626. }
  1627. while (!list_empty(&priv_ep->deferred_req_list)) {
  1628. request = cdns3_next_request(&priv_ep->deferred_req_list);
  1629. cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
  1630. -ESHUTDOWN);
  1631. }
  1632. priv_ep->descmis_req = NULL;
  1633. ep->desc = NULL;
  1634. priv_ep->flags &= ~EP_ENABLED;
  1635. spin_unlock_irqrestore(&priv_dev->lock, flags);
  1636. return ret;
  1637. }
  1638. /**
  1639. * cdns3_gadget_ep_queue Transfer data on endpoint
  1640. * @ep: endpoint object
  1641. * @request: request object
  1642. * @gfp_flags: gfp flags
  1643. *
  1644. * Returns 0 on success, error code elsewhere
  1645. */
  1646. static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
  1647. struct usb_request *request,
  1648. gfp_t gfp_flags)
  1649. {
  1650. struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
  1651. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  1652. struct cdns3_request *priv_req;
  1653. int ret = 0;
  1654. request->actual = 0;
  1655. request->status = -EINPROGRESS;
  1656. priv_req = to_cdns3_request(request);
  1657. trace_cdns3_ep_queue(priv_req);
  1658. if (priv_dev->dev_ver < DEV_VER_V2) {
  1659. ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep,
  1660. priv_req);
  1661. if (ret == EINPROGRESS)
  1662. return 0;
  1663. }
  1664. ret = cdns3_prepare_aligned_request_buf(priv_req);
  1665. if (ret < 0)
  1666. return ret;
  1667. ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request,
  1668. usb_endpoint_dir_in(ep->desc));
  1669. if (ret)
  1670. return ret;
  1671. list_add_tail(&request->list, &priv_ep->deferred_req_list);
  1672. /*
  1673. * If hardware endpoint configuration has not been set yet then
  1674. * just queue request in deferred list. Transfer will be started in
  1675. * cdns3_set_hw_configuration.
  1676. */
  1677. if (priv_dev->hw_configured_flag && !(priv_ep->flags & EP_STALLED) &&
  1678. !(priv_ep->flags & EP_STALL_PENDING))
  1679. cdns3_start_all_request(priv_dev, priv_ep);
  1680. return 0;
  1681. }
  1682. static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
  1683. gfp_t gfp_flags)
  1684. {
  1685. struct usb_request *zlp_request;
  1686. struct cdns3_endpoint *priv_ep;
  1687. struct cdns3_device *priv_dev;
  1688. unsigned long flags;
  1689. int ret;
  1690. if (!request || !ep)
  1691. return -EINVAL;
  1692. priv_ep = ep_to_cdns3_ep(ep);
  1693. priv_dev = priv_ep->cdns3_dev;
  1694. spin_lock_irqsave(&priv_dev->lock, flags);
  1695. ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags);
  1696. if (ret == 0 && request->zero && request->length &&
  1697. (request->length % ep->maxpacket == 0)) {
  1698. struct cdns3_request *priv_req;
  1699. zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
  1700. zlp_request->buf = priv_dev->zlp_buf;
  1701. zlp_request->length = 0;
  1702. priv_req = to_cdns3_request(zlp_request);
  1703. priv_req->flags |= REQUEST_ZLP;
  1704. dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n",
  1705. priv_ep->name);
  1706. ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags);
  1707. }
  1708. spin_unlock_irqrestore(&priv_dev->lock, flags);
  1709. return ret;
  1710. }
  1711. /**
  1712. * cdns3_gadget_ep_dequeue Remove request from transfer queue
  1713. * @ep: endpoint object associated with request
  1714. * @request: request object
  1715. *
  1716. * Returns 0 on success, error code elsewhere
  1717. */
  1718. int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
  1719. struct usb_request *request)
  1720. {
  1721. struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
  1722. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  1723. struct usb_request *req, *req_temp;
  1724. struct cdns3_request *priv_req;
  1725. struct cdns3_trb *link_trb;
  1726. unsigned long flags;
  1727. int ret = 0;
  1728. if (!ep || !request || !ep->desc)
  1729. return -EINVAL;
  1730. spin_lock_irqsave(&priv_dev->lock, flags);
  1731. priv_req = to_cdns3_request(request);
  1732. trace_cdns3_ep_dequeue(priv_req);
  1733. cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
  1734. list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list,
  1735. list) {
  1736. if (request == req)
  1737. goto found;
  1738. }
  1739. list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list,
  1740. list) {
  1741. if (request == req)
  1742. goto found;
  1743. }
  1744. goto not_found;
  1745. found:
  1746. if (priv_ep->wa1_trb == priv_req->trb)
  1747. cdns3_wa1_restore_cycle_bit(priv_ep);
  1748. link_trb = priv_req->trb;
  1749. cdns3_move_deq_to_next_trb(priv_req);
  1750. cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
  1751. /* Update ring */
  1752. request = cdns3_next_request(&priv_ep->deferred_req_list);
  1753. if (request) {
  1754. priv_req = to_cdns3_request(request);
  1755. link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
  1756. (priv_req->start_trb * TRB_SIZE));
  1757. link_trb->control = (link_trb->control & TRB_CYCLE) |
  1758. TRB_TYPE(TRB_LINK) | TRB_CHAIN | TRB_TOGGLE;
  1759. } else {
  1760. priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
  1761. }
  1762. not_found:
  1763. spin_unlock_irqrestore(&priv_dev->lock, flags);
  1764. return ret;
  1765. }
  1766. /**
  1767. * __cdns3_gadget_ep_set_halt Sets stall on selected endpoint
  1768. * Should be called after acquiring spin_lock and selecting ep
  1769. * @ep: endpoint object to set stall on.
  1770. */
  1771. void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep)
  1772. {
  1773. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  1774. trace_cdns3_halt(priv_ep, 1, 0);
  1775. if (!(priv_ep->flags & EP_STALLED)) {
  1776. u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts);
  1777. if (!(ep_sts_reg & EP_STS_DBUSY))
  1778. cdns3_ep_stall_flush(priv_ep);
  1779. else
  1780. priv_ep->flags |= EP_STALL_PENDING;
  1781. }
  1782. }
  1783. /**
  1784. * __cdns3_gadget_ep_clear_halt Clears stall on selected endpoint
  1785. * Should be called after acquiring spin_lock and selecting ep
  1786. * @ep: endpoint object to clear stall on
  1787. */
  1788. int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
  1789. {
  1790. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  1791. struct usb_request *request;
  1792. int ret = 0;
  1793. int val;
  1794. trace_cdns3_halt(priv_ep, 0, 0);
  1795. writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
  1796. /* wait for EPRST cleared */
  1797. readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
  1798. !(val & EP_CMD_EPRST), 1, 100);
  1799. if (ret)
  1800. return -EINVAL;
  1801. priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING);
  1802. request = cdns3_next_request(&priv_ep->pending_req_list);
  1803. if (request)
  1804. cdns3_rearm_transfer(priv_ep, 1);
  1805. cdns3_start_all_request(priv_dev, priv_ep);
  1806. return ret;
  1807. }
  1808. /**
  1809. * cdns3_gadget_ep_set_halt Sets/clears stall on selected endpoint
  1810. * @ep: endpoint object to set/clear stall on
  1811. * @value: 1 for set stall, 0 for clear stall
  1812. *
  1813. * Returns 0 on success, error code elsewhere
  1814. */
  1815. int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value)
  1816. {
  1817. struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
  1818. struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
  1819. unsigned long flags;
  1820. int ret = 0;
  1821. if (!(priv_ep->flags & EP_ENABLED))
  1822. return -EPERM;
  1823. spin_lock_irqsave(&priv_dev->lock, flags);
  1824. cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
  1825. if (!value) {
  1826. priv_ep->flags &= ~EP_WEDGE;
  1827. ret = __cdns3_gadget_ep_clear_halt(priv_ep);
  1828. } else {
  1829. __cdns3_gadget_ep_set_halt(priv_ep);
  1830. }
  1831. spin_unlock_irqrestore(&priv_dev->lock, flags);
  1832. return ret;
  1833. }
  1834. extern const struct usb_ep_ops cdns3_gadget_ep0_ops;
  1835. static const struct usb_ep_ops cdns3_gadget_ep_ops = {
  1836. .enable = cdns3_gadget_ep_enable,
  1837. .disable = cdns3_gadget_ep_disable,
  1838. .alloc_request = cdns3_gadget_ep_alloc_request,
  1839. .free_request = cdns3_gadget_ep_free_request,
  1840. .queue = cdns3_gadget_ep_queue,
  1841. .dequeue = cdns3_gadget_ep_dequeue,
  1842. .set_halt = cdns3_gadget_ep_set_halt,
  1843. .set_wedge = cdns3_gadget_ep_set_wedge,
  1844. };
  1845. /**
  1846. * cdns3_gadget_get_frame Returns number of actual ITP frame
  1847. * @gadget: gadget object
  1848. *
  1849. * Returns number of actual ITP frame
  1850. */
  1851. static int cdns3_gadget_get_frame(struct usb_gadget *gadget)
  1852. {
  1853. struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
  1854. return readl(&priv_dev->regs->usb_itpn);
  1855. }
  1856. int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev)
  1857. {
  1858. enum usb_device_speed speed;
  1859. speed = cdns3_get_speed(priv_dev);
  1860. if (speed >= USB_SPEED_SUPER)
  1861. return 0;
  1862. /* Start driving resume signaling to indicate remote wakeup. */
  1863. writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf);
  1864. return 0;
  1865. }
  1866. static int cdns3_gadget_wakeup(struct usb_gadget *gadget)
  1867. {
  1868. struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
  1869. unsigned long flags;
  1870. int ret = 0;
  1871. spin_lock_irqsave(&priv_dev->lock, flags);
  1872. ret = __cdns3_gadget_wakeup(priv_dev);
  1873. spin_unlock_irqrestore(&priv_dev->lock, flags);
  1874. return ret;
  1875. }
  1876. static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget,
  1877. int is_selfpowered)
  1878. {
  1879. struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
  1880. unsigned long flags;
  1881. spin_lock_irqsave(&priv_dev->lock, flags);
  1882. priv_dev->is_selfpowered = !!is_selfpowered;
  1883. spin_unlock_irqrestore(&priv_dev->lock, flags);
  1884. return 0;
  1885. }
  1886. static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on)
  1887. {
  1888. struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
  1889. if (is_on)
  1890. writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf);
  1891. else
  1892. writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
  1893. return 0;
  1894. }
  1895. static void cdns3_gadget_config(struct cdns3_device *priv_dev)
  1896. {
  1897. struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
  1898. u32 reg;
  1899. cdns3_ep0_config(priv_dev);
  1900. /* enable interrupts for endpoint 0 (in and out) */
  1901. writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, &regs->ep_ien);
  1902. /*
  1903. * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1
  1904. * revision of controller.
  1905. */
  1906. if (priv_dev->dev_ver == DEV_VER_TI_V1) {
  1907. reg = readl(&regs->dbg_link1);
  1908. reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK;
  1909. reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) |
  1910. DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET;
  1911. writel(reg, &regs->dbg_link1);
  1912. }
  1913. /*
  1914. * By default some platforms has set protected access to memory.
  1915. * This cause problem with cache, so driver restore non-secure
  1916. * access to memory.
  1917. */
  1918. reg = readl(&regs->dma_axi_ctrl);
  1919. reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) |
  1920. DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE);
  1921. writel(reg, &regs->dma_axi_ctrl);
  1922. /* enable generic interrupt*/
  1923. writel(USB_IEN_INIT, &regs->usb_ien);
  1924. writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, &regs->usb_conf);
  1925. cdns3_configure_dmult(priv_dev, NULL);
  1926. cdns3_gadget_pullup(&priv_dev->gadget, 1);
  1927. }
  1928. /**
  1929. * cdns3_gadget_udc_start Gadget start
  1930. * @gadget: gadget object
  1931. * @driver: driver which operates on this gadget
  1932. *
  1933. * Returns 0 on success, error code elsewhere
  1934. */
  1935. static int cdns3_gadget_udc_start(struct usb_gadget *gadget,
  1936. struct usb_gadget_driver *driver)
  1937. {
  1938. struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
  1939. unsigned long flags;
  1940. spin_lock_irqsave(&priv_dev->lock, flags);
  1941. priv_dev->gadget_driver = driver;
  1942. cdns3_gadget_config(priv_dev);
  1943. spin_unlock_irqrestore(&priv_dev->lock, flags);
  1944. return 0;
  1945. }
  1946. /**
  1947. * cdns3_gadget_udc_stop Stops gadget
  1948. * @gadget: gadget object
  1949. *
  1950. * Returns 0
  1951. */
  1952. static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
  1953. {
  1954. struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
  1955. struct cdns3_endpoint *priv_ep;
  1956. u32 bEndpointAddress;
  1957. struct usb_ep *ep;
  1958. int ret = 0;
  1959. int val;
  1960. priv_dev->gadget_driver = NULL;
  1961. priv_dev->onchip_used_size = 0;
  1962. priv_dev->out_mem_is_allocated = 0;
  1963. priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
  1964. list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
  1965. priv_ep = ep_to_cdns3_ep(ep);
  1966. bEndpointAddress = priv_ep->num | priv_ep->dir;
  1967. cdns3_select_ep(priv_dev, bEndpointAddress);
  1968. writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
  1969. readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
  1970. !(val & EP_CMD_EPRST), 1, 100);
  1971. }
  1972. /* disable interrupt for device */
  1973. writel(0, &priv_dev->regs->usb_ien);
  1974. writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
  1975. return ret;
  1976. }
  1977. static const struct usb_gadget_ops cdns3_gadget_ops = {
  1978. .get_frame = cdns3_gadget_get_frame,
  1979. .wakeup = cdns3_gadget_wakeup,
  1980. .set_selfpowered = cdns3_gadget_set_selfpowered,
  1981. .pullup = cdns3_gadget_pullup,
  1982. .udc_start = cdns3_gadget_udc_start,
  1983. .udc_stop = cdns3_gadget_udc_stop,
  1984. .match_ep = cdns3_gadget_match_ep,
  1985. };
  1986. static void cdns3_free_all_eps(struct cdns3_device *priv_dev)
  1987. {
  1988. int i;
  1989. /* ep0 OUT point to ep0 IN. */
  1990. priv_dev->eps[16] = NULL;
  1991. for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++)
  1992. if (priv_dev->eps[i]) {
  1993. cdns3_free_trb_pool(priv_dev->eps[i]);
  1994. devm_kfree(priv_dev->dev, priv_dev->eps[i]);
  1995. }
  1996. }
  1997. /**
  1998. * cdns3_init_eps Initializes software endpoints of gadget
  1999. * @cdns3: extended gadget object
  2000. *
  2001. * Returns 0 on success, error code elsewhere
  2002. */
  2003. static int cdns3_init_eps(struct cdns3_device *priv_dev)
  2004. {
  2005. u32 ep_enabled_reg, iso_ep_reg;
  2006. struct cdns3_endpoint *priv_ep;
  2007. int ep_dir, ep_number;
  2008. u32 ep_mask;
  2009. int ret = 0;
  2010. int i;
  2011. /* Read it from USB_CAP3 to USB_CAP5 */
  2012. ep_enabled_reg = readl(&priv_dev->regs->usb_cap3);
  2013. iso_ep_reg = readl(&priv_dev->regs->usb_cap4);
  2014. dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n");
  2015. for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
  2016. ep_dir = i >> 4; /* i div 16 */
  2017. ep_number = i & 0xF; /* i % 16 */
  2018. ep_mask = BIT(i);
  2019. if (!(ep_enabled_reg & ep_mask))
  2020. continue;
  2021. if (ep_dir && !ep_number) {
  2022. priv_dev->eps[i] = priv_dev->eps[0];
  2023. continue;
  2024. }
  2025. priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep),
  2026. GFP_KERNEL);
  2027. if (!priv_ep) {
  2028. ret = -ENOMEM;
  2029. goto err;
  2030. }
  2031. /* set parent of endpoint object */
  2032. priv_ep->cdns3_dev = priv_dev;
  2033. priv_dev->eps[i] = priv_ep;
  2034. priv_ep->num = ep_number;
  2035. priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT;
  2036. if (!ep_number) {
  2037. ret = cdns3_init_ep0(priv_dev, priv_ep);
  2038. if (ret) {
  2039. dev_err(priv_dev->dev, "Failed to init ep0\n");
  2040. goto err;
  2041. }
  2042. } else {
  2043. snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s",
  2044. ep_number, !!ep_dir ? "in" : "out");
  2045. priv_ep->endpoint.name = priv_ep->name;
  2046. usb_ep_set_maxpacket_limit(&priv_ep->endpoint,
  2047. CDNS3_EP_MAX_PACKET_LIMIT);
  2048. priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS;
  2049. priv_ep->endpoint.ops = &cdns3_gadget_ep_ops;
  2050. if (ep_dir)
  2051. priv_ep->endpoint.caps.dir_in = 1;
  2052. else
  2053. priv_ep->endpoint.caps.dir_out = 1;
  2054. if (iso_ep_reg & ep_mask)
  2055. priv_ep->endpoint.caps.type_iso = 1;
  2056. priv_ep->endpoint.caps.type_bulk = 1;
  2057. priv_ep->endpoint.caps.type_int = 1;
  2058. list_add_tail(&priv_ep->endpoint.ep_list,
  2059. &priv_dev->gadget.ep_list);
  2060. }
  2061. priv_ep->flags = 0;
  2062. dev_info(priv_dev->dev, "Initialized %s support: %s %s\n",
  2063. priv_ep->name,
  2064. priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "",
  2065. priv_ep->endpoint.caps.type_iso ? "ISO" : "");
  2066. INIT_LIST_HEAD(&priv_ep->pending_req_list);
  2067. INIT_LIST_HEAD(&priv_ep->deferred_req_list);
  2068. INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list);
  2069. }
  2070. return 0;
  2071. err:
  2072. cdns3_free_all_eps(priv_dev);
  2073. return -ENOMEM;
  2074. }
  2075. void cdns3_gadget_exit(struct cdns3 *cdns)
  2076. {
  2077. struct cdns3_device *priv_dev;
  2078. priv_dev = cdns->gadget_dev;
  2079. devm_free_irq(cdns->dev, cdns->dev_irq, cdns);
  2080. pm_runtime_mark_last_busy(cdns->dev);
  2081. pm_runtime_put_autosuspend(cdns->dev);
  2082. usb_del_gadget_udc(&priv_dev->gadget);
  2083. cdns3_free_all_eps(priv_dev);
  2084. while (!list_empty(&priv_dev->aligned_buf_list)) {
  2085. struct cdns3_aligned_buf *buf;
  2086. buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list);
  2087. dma_free_coherent(priv_dev->sysdev, buf->size,
  2088. buf->buf,
  2089. buf->dma);
  2090. list_del(&buf->list);
  2091. kfree(buf);
  2092. }
  2093. dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf,
  2094. priv_dev->setup_dma);
  2095. kfree(priv_dev->zlp_buf);
  2096. kfree(priv_dev);
  2097. cdns->gadget_dev = NULL;
  2098. cdns3_drd_switch_gadget(cdns, 0);
  2099. }
  2100. static int cdns3_gadget_start(struct cdns3 *cdns)
  2101. {
  2102. struct cdns3_device *priv_dev;
  2103. u32 max_speed;
  2104. int ret;
  2105. priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL);
  2106. if (!priv_dev)
  2107. return -ENOMEM;
  2108. cdns->gadget_dev = priv_dev;
  2109. priv_dev->sysdev = cdns->dev;
  2110. priv_dev->dev = cdns->dev;
  2111. priv_dev->regs = cdns->dev_regs;
  2112. device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size",
  2113. &priv_dev->onchip_buffers);
  2114. if (priv_dev->onchip_buffers <= 0) {
  2115. u32 reg = readl(&priv_dev->regs->usb_cap2);
  2116. priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg);
  2117. }
  2118. if (!priv_dev->onchip_buffers)
  2119. priv_dev->onchip_buffers = 256;
  2120. max_speed = usb_get_maximum_speed(cdns->dev);
  2121. /* Check the maximum_speed parameter */
  2122. switch (max_speed) {
  2123. case USB_SPEED_FULL:
  2124. writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
  2125. break;
  2126. case USB_SPEED_HIGH:
  2127. writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
  2128. break;
  2129. case USB_SPEED_SUPER:
  2130. break;
  2131. default:
  2132. dev_err(cdns->dev, "invalid maximum_speed parameter %d\n",
  2133. max_speed);
  2134. /* fall through */
  2135. case USB_SPEED_UNKNOWN:
  2136. /* default to superspeed */
  2137. max_speed = USB_SPEED_SUPER;
  2138. break;
  2139. }
  2140. /* fill gadget fields */
  2141. priv_dev->gadget.max_speed = max_speed;
  2142. priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
  2143. priv_dev->gadget.ops = &cdns3_gadget_ops;
  2144. priv_dev->gadget.name = "usb-ss-gadget";
  2145. priv_dev->gadget.sg_supported = 1;
  2146. priv_dev->gadget.quirk_avoids_skb_reserve = 1;
  2147. spin_lock_init(&priv_dev->lock);
  2148. INIT_WORK(&priv_dev->pending_status_wq,
  2149. cdns3_pending_setup_status_handler);
  2150. INIT_WORK(&priv_dev->aligned_buf_wq,
  2151. cdns3_free_aligned_request_buf);
  2152. /* initialize endpoint container */
  2153. INIT_LIST_HEAD(&priv_dev->gadget.ep_list);
  2154. INIT_LIST_HEAD(&priv_dev->aligned_buf_list);
  2155. ret = cdns3_init_eps(priv_dev);
  2156. if (ret) {
  2157. dev_err(priv_dev->dev, "Failed to create endpoints\n");
  2158. goto err1;
  2159. }
  2160. /* allocate memory for setup packet buffer */
  2161. priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8,
  2162. &priv_dev->setup_dma, GFP_DMA);
  2163. if (!priv_dev->setup_buf) {
  2164. ret = -ENOMEM;
  2165. goto err2;
  2166. }
  2167. priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6);
  2168. dev_dbg(priv_dev->dev, "Device Controller version: %08x\n",
  2169. readl(&priv_dev->regs->usb_cap6));
  2170. dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n",
  2171. readl(&priv_dev->regs->usb_cap1));
  2172. dev_dbg(priv_dev->dev, "On-Chip memory cnfiguration: %08x\n",
  2173. readl(&priv_dev->regs->usb_cap2));
  2174. priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver);
  2175. priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL);
  2176. if (!priv_dev->zlp_buf) {
  2177. ret = -ENOMEM;
  2178. goto err3;
  2179. }
  2180. /* add USB gadget device */
  2181. ret = usb_add_gadget_udc(priv_dev->dev, &priv_dev->gadget);
  2182. if (ret < 0) {
  2183. dev_err(priv_dev->dev,
  2184. "Failed to register USB device controller\n");
  2185. goto err4;
  2186. }
  2187. return 0;
  2188. err4:
  2189. kfree(priv_dev->zlp_buf);
  2190. err3:
  2191. dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf,
  2192. priv_dev->setup_dma);
  2193. err2:
  2194. cdns3_free_all_eps(priv_dev);
  2195. err1:
  2196. cdns->gadget_dev = NULL;
  2197. return ret;
  2198. }
  2199. static int __cdns3_gadget_init(struct cdns3 *cdns)
  2200. {
  2201. struct cdns3_device *priv_dev;
  2202. int ret = 0;
  2203. cdns3_drd_switch_gadget(cdns, 1);
  2204. pm_runtime_get_sync(cdns->dev);
  2205. ret = cdns3_gadget_start(cdns);
  2206. if (ret)
  2207. return ret;
  2208. priv_dev = cdns->gadget_dev;
  2209. /*
  2210. * Because interrupt line can be shared with other components in
  2211. * driver it can't use IRQF_ONESHOT flag here.
  2212. */
  2213. ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq,
  2214. cdns3_device_irq_handler,
  2215. cdns3_device_thread_irq_handler,
  2216. IRQF_SHARED, dev_name(cdns->dev), cdns);
  2217. if (ret)
  2218. goto err0;
  2219. return 0;
  2220. err0:
  2221. cdns3_gadget_exit(cdns);
  2222. return ret;
  2223. }
  2224. static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup)
  2225. {
  2226. struct cdns3_device *priv_dev = cdns->gadget_dev;
  2227. cdns3_disconnect_gadget(priv_dev);
  2228. priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
  2229. usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
  2230. cdns3_hw_reset_eps_config(priv_dev);
  2231. /* disable interrupt for device */
  2232. writel(0, &priv_dev->regs->usb_ien);
  2233. cdns3_gadget_pullup(&priv_dev->gadget, 0);
  2234. return 0;
  2235. }
  2236. static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated)
  2237. {
  2238. struct cdns3_device *priv_dev = cdns->gadget_dev;
  2239. if (!priv_dev->gadget_driver)
  2240. return 0;
  2241. cdns3_gadget_config(priv_dev);
  2242. return 0;
  2243. }
  2244. /**
  2245. * cdns3_gadget_init - initialize device structure
  2246. *
  2247. * cdns: cdns3 instance
  2248. *
  2249. * This function initializes the gadget.
  2250. */
  2251. int cdns3_gadget_init(struct cdns3 *cdns)
  2252. {
  2253. struct cdns3_role_driver *rdrv;
  2254. rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
  2255. if (!rdrv)
  2256. return -ENOMEM;
  2257. rdrv->start = __cdns3_gadget_init;
  2258. rdrv->stop = cdns3_gadget_exit;
  2259. rdrv->suspend = cdns3_gadget_suspend;
  2260. rdrv->resume = cdns3_gadget_resume;
  2261. rdrv->state = CDNS3_ROLE_STATE_INACTIVE;
  2262. rdrv->name = "gadget";
  2263. cdns->roles[USB_ROLE_DEVICE] = rdrv;
  2264. return 0;
  2265. }