lpc32xx_udc.c 85 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405
  1. /*
  2. * USB Gadget driver for LPC32xx
  3. *
  4. * Authors:
  5. * Kevin Wells <kevin.wells@nxp.com>
  6. * Mike James
  7. * Roland Stigge <stigge@antcom.de>
  8. *
  9. * Copyright (C) 2006 Philips Semiconductors
  10. * Copyright (C) 2009 NXP Semiconductors
  11. * Copyright (C) 2012 Roland Stigge
  12. *
  13. * Note: This driver is based on original work done by Mike James for
  14. * the LPC3180.
  15. *
  16. * This program is free software; you can redistribute it and/or modify
  17. * it under the terms of the GNU General Public License as published by
  18. * the Free Software Foundation; either version 2 of the License, or
  19. * (at your option) any later version.
  20. *
  21. * This program is distributed in the hope that it will be useful,
  22. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  23. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  24. * GNU General Public License for more details.
  25. *
  26. * You should have received a copy of the GNU General Public License
  27. * along with this program; if not, write to the Free Software
  28. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/platform_device.h>
  33. #include <linux/delay.h>
  34. #include <linux/ioport.h>
  35. #include <linux/slab.h>
  36. #include <linux/errno.h>
  37. #include <linux/init.h>
  38. #include <linux/list.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/proc_fs.h>
  41. #include <linux/clk.h>
  42. #include <linux/usb/ch9.h>
  43. #include <linux/usb/gadget.h>
  44. #include <linux/i2c.h>
  45. #include <linux/kthread.h>
  46. #include <linux/freezer.h>
  47. #include <linux/dma-mapping.h>
  48. #include <linux/dmapool.h>
  49. #include <linux/workqueue.h>
  50. #include <linux/of.h>
  51. #include <linux/usb/isp1301.h>
  52. #include <asm/byteorder.h>
  53. #include <mach/hardware.h>
  54. #include <linux/io.h>
  55. #include <asm/irq.h>
  56. #include <mach/platform.h>
  57. #include <mach/irqs.h>
  58. #include <mach/board.h>
  59. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  60. #include <linux/debugfs.h>
  61. #include <linux/seq_file.h>
  62. #endif
  63. /*
  64. * USB device configuration structure
  65. */
  66. typedef void (*usc_chg_event)(int);
  67. struct lpc32xx_usbd_cfg {
  68. int vbus_drv_pol; /* 0=active low drive for VBUS via ISP1301 */
  69. usc_chg_event conn_chgb; /* Connection change event (optional) */
  70. usc_chg_event susp_chgb; /* Suspend/resume event (optional) */
  71. usc_chg_event rmwk_chgb; /* Enable/disable remote wakeup */
  72. };
  73. /*
  74. * controller driver data structures
  75. */
  76. /* 16 endpoints (not to be confused with 32 hardware endpoints) */
  77. #define NUM_ENDPOINTS 16
  78. /*
  79. * IRQ indices make reading the code a little easier
  80. */
  81. #define IRQ_USB_LP 0
  82. #define IRQ_USB_HP 1
  83. #define IRQ_USB_DEVDMA 2
  84. #define IRQ_USB_ATX 3
  85. #define EP_OUT 0 /* RX (from host) */
  86. #define EP_IN 1 /* TX (to host) */
  87. /* Returns the interrupt mask for the selected hardware endpoint */
  88. #define EP_MASK_SEL(ep, dir) (1 << (((ep) * 2) + dir))
  89. #define EP_INT_TYPE 0
  90. #define EP_ISO_TYPE 1
  91. #define EP_BLK_TYPE 2
  92. #define EP_CTL_TYPE 3
  93. /* EP0 states */
  94. #define WAIT_FOR_SETUP 0 /* Wait for setup packet */
  95. #define DATA_IN 1 /* Expect dev->host transfer */
  96. #define DATA_OUT 2 /* Expect host->dev transfer */
  97. /* DD (DMA Descriptor) structure, requires word alignment, this is already
  98. * defined in the LPC32XX USB device header file, but this version is slightly
  99. * modified to tag some work data with each DMA descriptor. */
  100. struct lpc32xx_usbd_dd_gad {
  101. u32 dd_next_phy;
  102. u32 dd_setup;
  103. u32 dd_buffer_addr;
  104. u32 dd_status;
  105. u32 dd_iso_ps_mem_addr;
  106. u32 this_dma;
  107. u32 iso_status[6]; /* 5 spare */
  108. u32 dd_next_v;
  109. };
  110. /*
  111. * Logical endpoint structure
  112. */
  113. struct lpc32xx_ep {
  114. struct usb_ep ep;
  115. struct list_head queue;
  116. struct lpc32xx_udc *udc;
  117. u32 hwep_num_base; /* Physical hardware EP */
  118. u32 hwep_num; /* Maps to hardware endpoint */
  119. u32 maxpacket;
  120. u32 lep;
  121. bool is_in;
  122. bool req_pending;
  123. u32 eptype;
  124. u32 totalints;
  125. bool wedge;
  126. };
  127. /*
  128. * Common UDC structure
  129. */
  130. struct lpc32xx_udc {
  131. struct usb_gadget gadget;
  132. struct usb_gadget_driver *driver;
  133. struct platform_device *pdev;
  134. struct device *dev;
  135. struct dentry *pde;
  136. spinlock_t lock;
  137. struct i2c_client *isp1301_i2c_client;
  138. /* Board and device specific */
  139. struct lpc32xx_usbd_cfg *board;
  140. u32 io_p_start;
  141. u32 io_p_size;
  142. void __iomem *udp_baseaddr;
  143. int udp_irq[4];
  144. struct clk *usb_pll_clk;
  145. struct clk *usb_slv_clk;
  146. struct clk *usb_otg_clk;
  147. /* DMA support */
  148. u32 *udca_v_base;
  149. u32 udca_p_base;
  150. struct dma_pool *dd_cache;
  151. /* Common EP and control data */
  152. u32 enabled_devints;
  153. u32 enabled_hwepints;
  154. u32 dev_status;
  155. u32 realized_eps;
  156. /* VBUS detection, pullup, and power flags */
  157. u8 vbus;
  158. u8 last_vbus;
  159. int pullup;
  160. int poweron;
  161. /* Work queues related to I2C support */
  162. struct work_struct pullup_job;
  163. struct work_struct vbus_job;
  164. struct work_struct power_job;
  165. /* USB device peripheral - various */
  166. struct lpc32xx_ep ep[NUM_ENDPOINTS];
  167. bool enabled;
  168. bool clocked;
  169. bool suspended;
  170. int ep0state;
  171. atomic_t enabled_ep_cnt;
  172. wait_queue_head_t ep_disable_wait_queue;
  173. };
  174. /*
  175. * Endpoint request
  176. */
  177. struct lpc32xx_request {
  178. struct usb_request req;
  179. struct list_head queue;
  180. struct lpc32xx_usbd_dd_gad *dd_desc_ptr;
  181. bool mapped;
  182. bool send_zlp;
  183. };
  184. static inline struct lpc32xx_udc *to_udc(struct usb_gadget *g)
  185. {
  186. return container_of(g, struct lpc32xx_udc, gadget);
  187. }
  188. #define ep_dbg(epp, fmt, arg...) \
  189. dev_dbg(epp->udc->dev, "%s: " fmt, __func__, ## arg)
  190. #define ep_err(epp, fmt, arg...) \
  191. dev_err(epp->udc->dev, "%s: " fmt, __func__, ## arg)
  192. #define ep_info(epp, fmt, arg...) \
  193. dev_info(epp->udc->dev, "%s: " fmt, __func__, ## arg)
  194. #define ep_warn(epp, fmt, arg...) \
  195. dev_warn(epp->udc->dev, "%s:" fmt, __func__, ## arg)
  196. #define UDCA_BUFF_SIZE (128)
  197. /* TODO: When the clock framework is introduced in LPC32xx, IO_ADDRESS will
  198. * be replaced with an inremap()ed pointer
  199. * */
  200. #define USB_CTRL IO_ADDRESS(LPC32XX_CLK_PM_BASE + 0x64)
  201. /* USB_CTRL bit defines */
  202. #define USB_SLAVE_HCLK_EN (1 << 24)
  203. #define USB_HOST_NEED_CLK_EN (1 << 21)
  204. #define USB_DEV_NEED_CLK_EN (1 << 22)
  205. /**********************************************************************
  206. * USB device controller register offsets
  207. **********************************************************************/
  208. #define USBD_DEVINTST(x) ((x) + 0x200)
  209. #define USBD_DEVINTEN(x) ((x) + 0x204)
  210. #define USBD_DEVINTCLR(x) ((x) + 0x208)
  211. #define USBD_DEVINTSET(x) ((x) + 0x20C)
  212. #define USBD_CMDCODE(x) ((x) + 0x210)
  213. #define USBD_CMDDATA(x) ((x) + 0x214)
  214. #define USBD_RXDATA(x) ((x) + 0x218)
  215. #define USBD_TXDATA(x) ((x) + 0x21C)
  216. #define USBD_RXPLEN(x) ((x) + 0x220)
  217. #define USBD_TXPLEN(x) ((x) + 0x224)
  218. #define USBD_CTRL(x) ((x) + 0x228)
  219. #define USBD_DEVINTPRI(x) ((x) + 0x22C)
  220. #define USBD_EPINTST(x) ((x) + 0x230)
  221. #define USBD_EPINTEN(x) ((x) + 0x234)
  222. #define USBD_EPINTCLR(x) ((x) + 0x238)
  223. #define USBD_EPINTSET(x) ((x) + 0x23C)
  224. #define USBD_EPINTPRI(x) ((x) + 0x240)
  225. #define USBD_REEP(x) ((x) + 0x244)
  226. #define USBD_EPIND(x) ((x) + 0x248)
  227. #define USBD_EPMAXPSIZE(x) ((x) + 0x24C)
  228. /* DMA support registers only below */
  229. /* Set, clear, or get enabled state of the DMA request status. If
  230. * enabled, an IN or OUT token will start a DMA transfer for the EP */
  231. #define USBD_DMARST(x) ((x) + 0x250)
  232. #define USBD_DMARCLR(x) ((x) + 0x254)
  233. #define USBD_DMARSET(x) ((x) + 0x258)
  234. /* DMA UDCA head pointer */
  235. #define USBD_UDCAH(x) ((x) + 0x280)
  236. /* EP DMA status, enable, and disable. This is used to specifically
  237. * enabled or disable DMA for a specific EP */
  238. #define USBD_EPDMAST(x) ((x) + 0x284)
  239. #define USBD_EPDMAEN(x) ((x) + 0x288)
  240. #define USBD_EPDMADIS(x) ((x) + 0x28C)
  241. /* DMA master interrupts enable and pending interrupts */
  242. #define USBD_DMAINTST(x) ((x) + 0x290)
  243. #define USBD_DMAINTEN(x) ((x) + 0x294)
  244. /* DMA end of transfer interrupt enable, disable, status */
  245. #define USBD_EOTINTST(x) ((x) + 0x2A0)
  246. #define USBD_EOTINTCLR(x) ((x) + 0x2A4)
  247. #define USBD_EOTINTSET(x) ((x) + 0x2A8)
  248. /* New DD request interrupt enable, disable, status */
  249. #define USBD_NDDRTINTST(x) ((x) + 0x2AC)
  250. #define USBD_NDDRTINTCLR(x) ((x) + 0x2B0)
  251. #define USBD_NDDRTINTSET(x) ((x) + 0x2B4)
  252. /* DMA error interrupt enable, disable, status */
  253. #define USBD_SYSERRTINTST(x) ((x) + 0x2B8)
  254. #define USBD_SYSERRTINTCLR(x) ((x) + 0x2BC)
  255. #define USBD_SYSERRTINTSET(x) ((x) + 0x2C0)
  256. /**********************************************************************
  257. * USBD_DEVINTST/USBD_DEVINTEN/USBD_DEVINTCLR/USBD_DEVINTSET/
  258. * USBD_DEVINTPRI register definitions
  259. **********************************************************************/
  260. #define USBD_ERR_INT (1 << 9)
  261. #define USBD_EP_RLZED (1 << 8)
  262. #define USBD_TXENDPKT (1 << 7)
  263. #define USBD_RXENDPKT (1 << 6)
  264. #define USBD_CDFULL (1 << 5)
  265. #define USBD_CCEMPTY (1 << 4)
  266. #define USBD_DEV_STAT (1 << 3)
  267. #define USBD_EP_SLOW (1 << 2)
  268. #define USBD_EP_FAST (1 << 1)
  269. #define USBD_FRAME (1 << 0)
  270. /**********************************************************************
  271. * USBD_EPINTST/USBD_EPINTEN/USBD_EPINTCLR/USBD_EPINTSET/
  272. * USBD_EPINTPRI register definitions
  273. **********************************************************************/
  274. /* End point selection macro (RX) */
  275. #define USBD_RX_EP_SEL(e) (1 << ((e) << 1))
  276. /* End point selection macro (TX) */
  277. #define USBD_TX_EP_SEL(e) (1 << (((e) << 1) + 1))
  278. /**********************************************************************
  279. * USBD_REEP/USBD_DMARST/USBD_DMARCLR/USBD_DMARSET/USBD_EPDMAST/
  280. * USBD_EPDMAEN/USBD_EPDMADIS/
  281. * USBD_NDDRTINTST/USBD_NDDRTINTCLR/USBD_NDDRTINTSET/
  282. * USBD_EOTINTST/USBD_EOTINTCLR/USBD_EOTINTSET/
  283. * USBD_SYSERRTINTST/USBD_SYSERRTINTCLR/USBD_SYSERRTINTSET
  284. * register definitions
  285. **********************************************************************/
  286. /* Endpoint selection macro */
  287. #define USBD_EP_SEL(e) (1 << (e))
  288. /**********************************************************************
  289. * SBD_DMAINTST/USBD_DMAINTEN
  290. **********************************************************************/
  291. #define USBD_SYS_ERR_INT (1 << 2)
  292. #define USBD_NEW_DD_INT (1 << 1)
  293. #define USBD_EOT_INT (1 << 0)
  294. /**********************************************************************
  295. * USBD_RXPLEN register definitions
  296. **********************************************************************/
  297. #define USBD_PKT_RDY (1 << 11)
  298. #define USBD_DV (1 << 10)
  299. #define USBD_PK_LEN_MASK 0x3FF
  300. /**********************************************************************
  301. * USBD_CTRL register definitions
  302. **********************************************************************/
  303. #define USBD_LOG_ENDPOINT(e) ((e) << 2)
  304. #define USBD_WR_EN (1 << 1)
  305. #define USBD_RD_EN (1 << 0)
  306. /**********************************************************************
  307. * USBD_CMDCODE register definitions
  308. **********************************************************************/
  309. #define USBD_CMD_CODE(c) ((c) << 16)
  310. #define USBD_CMD_PHASE(p) ((p) << 8)
  311. /**********************************************************************
  312. * USBD_DMARST/USBD_DMARCLR/USBD_DMARSET register definitions
  313. **********************************************************************/
  314. #define USBD_DMAEP(e) (1 << (e))
  315. /* DD (DMA Descriptor) structure, requires word alignment */
  316. struct lpc32xx_usbd_dd {
  317. u32 *dd_next;
  318. u32 dd_setup;
  319. u32 dd_buffer_addr;
  320. u32 dd_status;
  321. u32 dd_iso_ps_mem_addr;
  322. };
  323. /* dd_setup bit defines */
  324. #define DD_SETUP_ATLE_DMA_MODE 0x01
  325. #define DD_SETUP_NEXT_DD_VALID 0x04
  326. #define DD_SETUP_ISO_EP 0x10
  327. #define DD_SETUP_PACKETLEN(n) (((n) & 0x7FF) << 5)
  328. #define DD_SETUP_DMALENBYTES(n) (((n) & 0xFFFF) << 16)
  329. /* dd_status bit defines */
  330. #define DD_STATUS_DD_RETIRED 0x01
  331. #define DD_STATUS_STS_MASK 0x1E
  332. #define DD_STATUS_STS_NS 0x00 /* Not serviced */
  333. #define DD_STATUS_STS_BS 0x02 /* Being serviced */
  334. #define DD_STATUS_STS_NC 0x04 /* Normal completion */
  335. #define DD_STATUS_STS_DUR 0x06 /* Data underrun (short packet) */
  336. #define DD_STATUS_STS_DOR 0x08 /* Data overrun */
  337. #define DD_STATUS_STS_SE 0x12 /* System error */
  338. #define DD_STATUS_PKT_VAL 0x20 /* Packet valid */
  339. #define DD_STATUS_LSB_EX 0x40 /* LS byte extracted (ATLE) */
  340. #define DD_STATUS_MSB_EX 0x80 /* MS byte extracted (ATLE) */
  341. #define DD_STATUS_MLEN(n) (((n) >> 8) & 0x3F)
  342. #define DD_STATUS_CURDMACNT(n) (((n) >> 16) & 0xFFFF)
  343. /*
  344. *
  345. * Protocol engine bits below
  346. *
  347. */
  348. /* Device Interrupt Bit Definitions */
  349. #define FRAME_INT 0x00000001
  350. #define EP_FAST_INT 0x00000002
  351. #define EP_SLOW_INT 0x00000004
  352. #define DEV_STAT_INT 0x00000008
  353. #define CCEMTY_INT 0x00000010
  354. #define CDFULL_INT 0x00000020
  355. #define RxENDPKT_INT 0x00000040
  356. #define TxENDPKT_INT 0x00000080
  357. #define EP_RLZED_INT 0x00000100
  358. #define ERR_INT 0x00000200
  359. /* Rx & Tx Packet Length Definitions */
  360. #define PKT_LNGTH_MASK 0x000003FF
  361. #define PKT_DV 0x00000400
  362. #define PKT_RDY 0x00000800
  363. /* USB Control Definitions */
  364. #define CTRL_RD_EN 0x00000001
  365. #define CTRL_WR_EN 0x00000002
  366. /* Command Codes */
  367. #define CMD_SET_ADDR 0x00D00500
  368. #define CMD_CFG_DEV 0x00D80500
  369. #define CMD_SET_MODE 0x00F30500
  370. #define CMD_RD_FRAME 0x00F50500
  371. #define DAT_RD_FRAME 0x00F50200
  372. #define CMD_RD_TEST 0x00FD0500
  373. #define DAT_RD_TEST 0x00FD0200
  374. #define CMD_SET_DEV_STAT 0x00FE0500
  375. #define CMD_GET_DEV_STAT 0x00FE0500
  376. #define DAT_GET_DEV_STAT 0x00FE0200
  377. #define CMD_GET_ERR_CODE 0x00FF0500
  378. #define DAT_GET_ERR_CODE 0x00FF0200
  379. #define CMD_RD_ERR_STAT 0x00FB0500
  380. #define DAT_RD_ERR_STAT 0x00FB0200
  381. #define DAT_WR_BYTE(x) (0x00000100 | ((x) << 16))
  382. #define CMD_SEL_EP(x) (0x00000500 | ((x) << 16))
  383. #define DAT_SEL_EP(x) (0x00000200 | ((x) << 16))
  384. #define CMD_SEL_EP_CLRI(x) (0x00400500 | ((x) << 16))
  385. #define DAT_SEL_EP_CLRI(x) (0x00400200 | ((x) << 16))
  386. #define CMD_SET_EP_STAT(x) (0x00400500 | ((x) << 16))
  387. #define CMD_CLR_BUF 0x00F20500
  388. #define DAT_CLR_BUF 0x00F20200
  389. #define CMD_VALID_BUF 0x00FA0500
  390. /* Device Address Register Definitions */
  391. #define DEV_ADDR_MASK 0x7F
  392. #define DEV_EN 0x80
  393. /* Device Configure Register Definitions */
  394. #define CONF_DVICE 0x01
  395. /* Device Mode Register Definitions */
  396. #define AP_CLK 0x01
  397. #define INAK_CI 0x02
  398. #define INAK_CO 0x04
  399. #define INAK_II 0x08
  400. #define INAK_IO 0x10
  401. #define INAK_BI 0x20
  402. #define INAK_BO 0x40
  403. /* Device Status Register Definitions */
  404. #define DEV_CON 0x01
  405. #define DEV_CON_CH 0x02
  406. #define DEV_SUS 0x04
  407. #define DEV_SUS_CH 0x08
  408. #define DEV_RST 0x10
  409. /* Error Code Register Definitions */
  410. #define ERR_EC_MASK 0x0F
  411. #define ERR_EA 0x10
  412. /* Error Status Register Definitions */
  413. #define ERR_PID 0x01
  414. #define ERR_UEPKT 0x02
  415. #define ERR_DCRC 0x04
  416. #define ERR_TIMOUT 0x08
  417. #define ERR_EOP 0x10
  418. #define ERR_B_OVRN 0x20
  419. #define ERR_BTSTF 0x40
  420. #define ERR_TGL 0x80
  421. /* Endpoint Select Register Definitions */
  422. #define EP_SEL_F 0x01
  423. #define EP_SEL_ST 0x02
  424. #define EP_SEL_STP 0x04
  425. #define EP_SEL_PO 0x08
  426. #define EP_SEL_EPN 0x10
  427. #define EP_SEL_B_1_FULL 0x20
  428. #define EP_SEL_B_2_FULL 0x40
  429. /* Endpoint Status Register Definitions */
  430. #define EP_STAT_ST 0x01
  431. #define EP_STAT_DA 0x20
  432. #define EP_STAT_RF_MO 0x40
  433. #define EP_STAT_CND_ST 0x80
  434. /* Clear Buffer Register Definitions */
  435. #define CLR_BUF_PO 0x01
  436. /* DMA Interrupt Bit Definitions */
  437. #define EOT_INT 0x01
  438. #define NDD_REQ_INT 0x02
  439. #define SYS_ERR_INT 0x04
  440. #define DRIVER_VERSION "1.03"
  441. static const char driver_name[] = "lpc32xx_udc";
  442. /*
  443. *
  444. * proc interface support
  445. *
  446. */
  447. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  448. static char *epnames[] = {"INT", "ISO", "BULK", "CTRL"};
  449. static const char debug_filename[] = "driver/udc";
  450. static void proc_ep_show(struct seq_file *s, struct lpc32xx_ep *ep)
  451. {
  452. struct lpc32xx_request *req;
  453. seq_printf(s, "\n");
  454. seq_printf(s, "%12s, maxpacket %4d %3s",
  455. ep->ep.name, ep->ep.maxpacket,
  456. ep->is_in ? "in" : "out");
  457. seq_printf(s, " type %4s", epnames[ep->eptype]);
  458. seq_printf(s, " ints: %12d", ep->totalints);
  459. if (list_empty(&ep->queue))
  460. seq_printf(s, "\t(queue empty)\n");
  461. else {
  462. list_for_each_entry(req, &ep->queue, queue) {
  463. u32 length = req->req.actual;
  464. seq_printf(s, "\treq %p len %d/%d buf %p\n",
  465. &req->req, length,
  466. req->req.length, req->req.buf);
  467. }
  468. }
  469. }
  470. static int proc_udc_show(struct seq_file *s, void *unused)
  471. {
  472. struct lpc32xx_udc *udc = s->private;
  473. struct lpc32xx_ep *ep;
  474. unsigned long flags;
  475. seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION);
  476. spin_lock_irqsave(&udc->lock, flags);
  477. seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n",
  478. udc->vbus ? "present" : "off",
  479. udc->enabled ? (udc->vbus ? "active" : "enabled") :
  480. "disabled",
  481. udc->gadget.is_selfpowered ? "self" : "VBUS",
  482. udc->suspended ? ", suspended" : "",
  483. udc->driver ? udc->driver->driver.name : "(none)");
  484. if (udc->enabled && udc->vbus) {
  485. proc_ep_show(s, &udc->ep[0]);
  486. list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list)
  487. proc_ep_show(s, ep);
  488. }
  489. spin_unlock_irqrestore(&udc->lock, flags);
  490. return 0;
  491. }
  492. static int proc_udc_open(struct inode *inode, struct file *file)
  493. {
  494. return single_open(file, proc_udc_show, PDE_DATA(inode));
  495. }
  496. static const struct file_operations proc_ops = {
  497. .owner = THIS_MODULE,
  498. .open = proc_udc_open,
  499. .read = seq_read,
  500. .llseek = seq_lseek,
  501. .release = single_release,
  502. };
  503. static void create_debug_file(struct lpc32xx_udc *udc)
  504. {
  505. udc->pde = debugfs_create_file(debug_filename, 0, NULL, udc, &proc_ops);
  506. }
  507. static void remove_debug_file(struct lpc32xx_udc *udc)
  508. {
  509. debugfs_remove(udc->pde);
  510. }
  511. #else
  512. static inline void create_debug_file(struct lpc32xx_udc *udc) {}
  513. static inline void remove_debug_file(struct lpc32xx_udc *udc) {}
  514. #endif
  515. /* Primary initialization sequence for the ISP1301 transceiver */
  516. static void isp1301_udc_configure(struct lpc32xx_udc *udc)
  517. {
  518. /* LPC32XX only supports DAT_SE0 USB mode */
  519. /* This sequence is important */
  520. /* Disable transparent UART mode first */
  521. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  522. (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
  523. MC1_UART_EN);
  524. /* Set full speed and SE0 mode */
  525. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  526. (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
  527. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  528. ISP1301_I2C_MODE_CONTROL_1, (MC1_SPEED_REG | MC1_DAT_SE0));
  529. /*
  530. * The PSW_OE enable bit state is reversed in the ISP1301 User's Guide
  531. */
  532. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  533. (ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
  534. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  535. ISP1301_I2C_MODE_CONTROL_2, (MC2_BI_DI | MC2_SPD_SUSP_CTRL));
  536. /* Driver VBUS_DRV high or low depending on board setup */
  537. if (udc->board->vbus_drv_pol != 0)
  538. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  539. ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DRV);
  540. else
  541. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  542. ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
  543. OTG1_VBUS_DRV);
  544. /* Bi-directional mode with suspend control
  545. * Enable both pulldowns for now - the pullup will be enable when VBUS
  546. * is detected */
  547. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  548. (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
  549. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  550. ISP1301_I2C_OTG_CONTROL_1,
  551. (0 | OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN));
  552. /* Discharge VBUS (just in case) */
  553. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  554. ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
  555. msleep(1);
  556. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  557. (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
  558. OTG1_VBUS_DISCHRG);
  559. /* Clear and enable VBUS high edge interrupt */
  560. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  561. ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
  562. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  563. ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
  564. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  565. ISP1301_I2C_INTERRUPT_FALLING, INT_VBUS_VLD);
  566. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  567. ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
  568. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  569. ISP1301_I2C_INTERRUPT_RISING, INT_VBUS_VLD);
  570. /* Enable usb_need_clk clock after transceiver is initialized */
  571. writel((readl(USB_CTRL) | USB_DEV_NEED_CLK_EN), USB_CTRL);
  572. dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n",
  573. i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00));
  574. dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n",
  575. i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x02));
  576. dev_info(udc->dev, "ISP1301 Version ID : 0x%04x\n",
  577. i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x14));
  578. }
  579. /* Enables or disables the USB device pullup via the ISP1301 transceiver */
  580. static void isp1301_pullup_set(struct lpc32xx_udc *udc)
  581. {
  582. if (udc->pullup)
  583. /* Enable pullup for bus signalling */
  584. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  585. ISP1301_I2C_OTG_CONTROL_1, OTG1_DP_PULLUP);
  586. else
  587. /* Enable pullup for bus signalling */
  588. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  589. ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
  590. OTG1_DP_PULLUP);
  591. }
  592. static void pullup_work(struct work_struct *work)
  593. {
  594. struct lpc32xx_udc *udc =
  595. container_of(work, struct lpc32xx_udc, pullup_job);
  596. isp1301_pullup_set(udc);
  597. }
  598. static void isp1301_pullup_enable(struct lpc32xx_udc *udc, int en_pullup,
  599. int block)
  600. {
  601. if (en_pullup == udc->pullup)
  602. return;
  603. udc->pullup = en_pullup;
  604. if (block)
  605. isp1301_pullup_set(udc);
  606. else
  607. /* defer slow i2c pull up setting */
  608. schedule_work(&udc->pullup_job);
  609. }
  610. #ifdef CONFIG_PM
  611. /* Powers up or down the ISP1301 transceiver */
  612. static void isp1301_set_powerstate(struct lpc32xx_udc *udc, int enable)
  613. {
  614. if (enable != 0)
  615. /* Power up ISP1301 - this ISP1301 will automatically wakeup
  616. when VBUS is detected */
  617. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  618. ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR,
  619. MC2_GLOBAL_PWR_DN);
  620. else
  621. /* Power down ISP1301 */
  622. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  623. ISP1301_I2C_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
  624. }
  625. static void power_work(struct work_struct *work)
  626. {
  627. struct lpc32xx_udc *udc =
  628. container_of(work, struct lpc32xx_udc, power_job);
  629. isp1301_set_powerstate(udc, udc->poweron);
  630. }
  631. #endif
  632. /*
  633. *
  634. * USB protocol engine command/data read/write helper functions
  635. *
  636. */
  637. /* Issues a single command to the USB device state machine */
  638. static void udc_protocol_cmd_w(struct lpc32xx_udc *udc, u32 cmd)
  639. {
  640. u32 pass = 0;
  641. int to;
  642. /* EP may lock on CLRI if this read isn't done */
  643. u32 tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
  644. (void) tmp;
  645. while (pass == 0) {
  646. writel(USBD_CCEMPTY, USBD_DEVINTCLR(udc->udp_baseaddr));
  647. /* Write command code */
  648. writel(cmd, USBD_CMDCODE(udc->udp_baseaddr));
  649. to = 10000;
  650. while (((readl(USBD_DEVINTST(udc->udp_baseaddr)) &
  651. USBD_CCEMPTY) == 0) && (to > 0)) {
  652. to--;
  653. }
  654. if (to > 0)
  655. pass = 1;
  656. cpu_relax();
  657. }
  658. }
  659. /* Issues 2 commands (or command and data) to the USB device state machine */
  660. static inline void udc_protocol_cmd_data_w(struct lpc32xx_udc *udc, u32 cmd,
  661. u32 data)
  662. {
  663. udc_protocol_cmd_w(udc, cmd);
  664. udc_protocol_cmd_w(udc, data);
  665. }
  666. /* Issues a single command to the USB device state machine and reads
  667. * response data */
  668. static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd)
  669. {
  670. u32 tmp;
  671. int to = 1000;
  672. /* Write a command and read data from the protocol engine */
  673. writel((USBD_CDFULL | USBD_CCEMPTY),
  674. USBD_DEVINTCLR(udc->udp_baseaddr));
  675. /* Write command code */
  676. udc_protocol_cmd_w(udc, cmd);
  677. tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
  678. while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CDFULL))
  679. && (to > 0))
  680. to--;
  681. if (!to)
  682. dev_dbg(udc->dev,
  683. "Protocol engine didn't receive response (CDFULL)\n");
  684. return readl(USBD_CMDDATA(udc->udp_baseaddr));
  685. }
  686. /*
  687. *
  688. * USB device interrupt mask support functions
  689. *
  690. */
  691. /* Enable one or more USB device interrupts */
  692. static inline void uda_enable_devint(struct lpc32xx_udc *udc, u32 devmask)
  693. {
  694. udc->enabled_devints |= devmask;
  695. writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
  696. }
  697. /* Disable one or more USB device interrupts */
  698. static inline void uda_disable_devint(struct lpc32xx_udc *udc, u32 mask)
  699. {
  700. udc->enabled_devints &= ~mask;
  701. writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
  702. }
  703. /* Clear one or more USB device interrupts */
  704. static inline void uda_clear_devint(struct lpc32xx_udc *udc, u32 mask)
  705. {
  706. writel(mask, USBD_DEVINTCLR(udc->udp_baseaddr));
  707. }
  708. /*
  709. *
  710. * Endpoint interrupt disable/enable functions
  711. *
  712. */
  713. /* Enable one or more USB endpoint interrupts */
  714. static void uda_enable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
  715. {
  716. udc->enabled_hwepints |= (1 << hwep);
  717. writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
  718. }
  719. /* Disable one or more USB endpoint interrupts */
  720. static void uda_disable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
  721. {
  722. udc->enabled_hwepints &= ~(1 << hwep);
  723. writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
  724. }
  725. /* Clear one or more USB endpoint interrupts */
  726. static inline void uda_clear_hwepint(struct lpc32xx_udc *udc, u32 hwep)
  727. {
  728. writel((1 << hwep), USBD_EPINTCLR(udc->udp_baseaddr));
  729. }
  730. /* Enable DMA for the HW channel */
  731. static inline void udc_ep_dma_enable(struct lpc32xx_udc *udc, u32 hwep)
  732. {
  733. writel((1 << hwep), USBD_EPDMAEN(udc->udp_baseaddr));
  734. }
  735. /* Disable DMA for the HW channel */
  736. static inline void udc_ep_dma_disable(struct lpc32xx_udc *udc, u32 hwep)
  737. {
  738. writel((1 << hwep), USBD_EPDMADIS(udc->udp_baseaddr));
  739. }
  740. /*
  741. *
  742. * Endpoint realize/unrealize functions
  743. *
  744. */
  745. /* Before an endpoint can be used, it needs to be realized
  746. * in the USB protocol engine - this realizes the endpoint.
  747. * The interrupt (FIFO or DMA) is not enabled with this function */
  748. static void udc_realize_hwep(struct lpc32xx_udc *udc, u32 hwep,
  749. u32 maxpacket)
  750. {
  751. int to = 1000;
  752. writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
  753. writel(hwep, USBD_EPIND(udc->udp_baseaddr));
  754. udc->realized_eps |= (1 << hwep);
  755. writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
  756. writel(maxpacket, USBD_EPMAXPSIZE(udc->udp_baseaddr));
  757. /* Wait until endpoint is realized in hardware */
  758. while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) &
  759. USBD_EP_RLZED)) && (to > 0))
  760. to--;
  761. if (!to)
  762. dev_dbg(udc->dev, "EP not correctly realized in hardware\n");
  763. writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
  764. }
  765. /* Unrealize an EP */
  766. static void udc_unrealize_hwep(struct lpc32xx_udc *udc, u32 hwep)
  767. {
  768. udc->realized_eps &= ~(1 << hwep);
  769. writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
  770. }
  771. /*
  772. *
  773. * Endpoint support functions
  774. *
  775. */
  776. /* Select and clear endpoint interrupt */
  777. static u32 udc_selep_clrint(struct lpc32xx_udc *udc, u32 hwep)
  778. {
  779. udc_protocol_cmd_w(udc, CMD_SEL_EP_CLRI(hwep));
  780. return udc_protocol_cmd_r(udc, DAT_SEL_EP_CLRI(hwep));
  781. }
  782. /* Disables the endpoint in the USB protocol engine */
  783. static void udc_disable_hwep(struct lpc32xx_udc *udc, u32 hwep)
  784. {
  785. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
  786. DAT_WR_BYTE(EP_STAT_DA));
  787. }
  788. /* Stalls the endpoint - endpoint will return STALL */
  789. static void udc_stall_hwep(struct lpc32xx_udc *udc, u32 hwep)
  790. {
  791. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
  792. DAT_WR_BYTE(EP_STAT_ST));
  793. }
  794. /* Clear stall or reset endpoint */
  795. static void udc_clrstall_hwep(struct lpc32xx_udc *udc, u32 hwep)
  796. {
  797. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
  798. DAT_WR_BYTE(0));
  799. }
  800. /* Select an endpoint for endpoint status, clear, validate */
  801. static void udc_select_hwep(struct lpc32xx_udc *udc, u32 hwep)
  802. {
  803. udc_protocol_cmd_w(udc, CMD_SEL_EP(hwep));
  804. }
  805. /*
  806. *
  807. * Endpoint buffer management functions
  808. *
  809. */
  810. /* Clear the current endpoint's buffer */
  811. static void udc_clr_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
  812. {
  813. udc_select_hwep(udc, hwep);
  814. udc_protocol_cmd_w(udc, CMD_CLR_BUF);
  815. }
  816. /* Validate the current endpoint's buffer */
  817. static void udc_val_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
  818. {
  819. udc_select_hwep(udc, hwep);
  820. udc_protocol_cmd_w(udc, CMD_VALID_BUF);
  821. }
  822. static inline u32 udc_clearep_getsts(struct lpc32xx_udc *udc, u32 hwep)
  823. {
  824. /* Clear EP interrupt */
  825. uda_clear_hwepint(udc, hwep);
  826. return udc_selep_clrint(udc, hwep);
  827. }
  828. /*
  829. *
  830. * USB EP DMA support
  831. *
  832. */
  833. /* Allocate a DMA Descriptor */
  834. static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc)
  835. {
  836. dma_addr_t dma;
  837. struct lpc32xx_usbd_dd_gad *dd;
  838. dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc(
  839. udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma);
  840. if (dd)
  841. dd->this_dma = dma;
  842. return dd;
  843. }
  844. /* Free a DMA Descriptor */
  845. static void udc_dd_free(struct lpc32xx_udc *udc, struct lpc32xx_usbd_dd_gad *dd)
  846. {
  847. dma_pool_free(udc->dd_cache, dd, dd->this_dma);
  848. }
  849. /*
  850. *
  851. * USB setup and shutdown functions
  852. *
  853. */
  854. /* Enables or disables most of the USB system clocks when low power mode is
  855. * needed. Clocks are typically started on a connection event, and disabled
  856. * when a cable is disconnected */
  857. static void udc_clk_set(struct lpc32xx_udc *udc, int enable)
  858. {
  859. if (enable != 0) {
  860. if (udc->clocked)
  861. return;
  862. udc->clocked = 1;
  863. /* 48MHz PLL up */
  864. clk_enable(udc->usb_pll_clk);
  865. /* Enable the USB device clock */
  866. writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN,
  867. USB_CTRL);
  868. clk_enable(udc->usb_otg_clk);
  869. } else {
  870. if (!udc->clocked)
  871. return;
  872. udc->clocked = 0;
  873. /* Never disable the USB_HCLK during normal operation */
  874. /* 48MHz PLL dpwn */
  875. clk_disable(udc->usb_pll_clk);
  876. /* Disable the USB device clock */
  877. writel(readl(USB_CTRL) & ~USB_DEV_NEED_CLK_EN,
  878. USB_CTRL);
  879. clk_disable(udc->usb_otg_clk);
  880. }
  881. }
  882. /* Set/reset USB device address */
  883. static void udc_set_address(struct lpc32xx_udc *udc, u32 addr)
  884. {
  885. /* Address will be latched at the end of the status phase, or
  886. latched immediately if function is called twice */
  887. udc_protocol_cmd_data_w(udc, CMD_SET_ADDR,
  888. DAT_WR_BYTE(DEV_EN | addr));
  889. }
  890. /* Setup up a IN request for DMA transfer - this consists of determining the
  891. * list of DMA addresses for the transfer, allocating DMA Descriptors,
  892. * installing the DD into the UDCA, and then enabling the DMA for that EP */
  893. static int udc_ep_in_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  894. {
  895. struct lpc32xx_request *req;
  896. u32 hwep = ep->hwep_num;
  897. ep->req_pending = 1;
  898. /* There will always be a request waiting here */
  899. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  900. /* Place the DD Descriptor into the UDCA */
  901. udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
  902. /* Enable DMA and interrupt for the HW EP */
  903. udc_ep_dma_enable(udc, hwep);
  904. /* Clear ZLP if last packet is not of MAXP size */
  905. if (req->req.length % ep->ep.maxpacket)
  906. req->send_zlp = 0;
  907. return 0;
  908. }
  909. /* Setup up a OUT request for DMA transfer - this consists of determining the
  910. * list of DMA addresses for the transfer, allocating DMA Descriptors,
  911. * installing the DD into the UDCA, and then enabling the DMA for that EP */
  912. static int udc_ep_out_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  913. {
  914. struct lpc32xx_request *req;
  915. u32 hwep = ep->hwep_num;
  916. ep->req_pending = 1;
  917. /* There will always be a request waiting here */
  918. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  919. /* Place the DD Descriptor into the UDCA */
  920. udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
  921. /* Enable DMA and interrupt for the HW EP */
  922. udc_ep_dma_enable(udc, hwep);
  923. return 0;
  924. }
  925. static void udc_disable(struct lpc32xx_udc *udc)
  926. {
  927. u32 i;
  928. /* Disable device */
  929. udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
  930. udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(0));
  931. /* Disable all device interrupts (including EP0) */
  932. uda_disable_devint(udc, 0x3FF);
  933. /* Disable and reset all endpoint interrupts */
  934. for (i = 0; i < 32; i++) {
  935. uda_disable_hwepint(udc, i);
  936. uda_clear_hwepint(udc, i);
  937. udc_disable_hwep(udc, i);
  938. udc_unrealize_hwep(udc, i);
  939. udc->udca_v_base[i] = 0;
  940. /* Disable and clear all interrupts and DMA */
  941. udc_ep_dma_disable(udc, i);
  942. writel((1 << i), USBD_EOTINTCLR(udc->udp_baseaddr));
  943. writel((1 << i), USBD_NDDRTINTCLR(udc->udp_baseaddr));
  944. writel((1 << i), USBD_SYSERRTINTCLR(udc->udp_baseaddr));
  945. writel((1 << i), USBD_DMARCLR(udc->udp_baseaddr));
  946. }
  947. /* Disable DMA interrupts */
  948. writel(0, USBD_DMAINTEN(udc->udp_baseaddr));
  949. writel(0, USBD_UDCAH(udc->udp_baseaddr));
  950. }
  951. static void udc_enable(struct lpc32xx_udc *udc)
  952. {
  953. u32 i;
  954. struct lpc32xx_ep *ep = &udc->ep[0];
  955. /* Start with known state */
  956. udc_disable(udc);
  957. /* Enable device */
  958. udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(DEV_CON));
  959. /* EP interrupts on high priority, FRAME interrupt on low priority */
  960. writel(USBD_EP_FAST, USBD_DEVINTPRI(udc->udp_baseaddr));
  961. writel(0xFFFF, USBD_EPINTPRI(udc->udp_baseaddr));
  962. /* Clear any pending device interrupts */
  963. writel(0x3FF, USBD_DEVINTCLR(udc->udp_baseaddr));
  964. /* Setup UDCA - not yet used (DMA) */
  965. writel(udc->udca_p_base, USBD_UDCAH(udc->udp_baseaddr));
  966. /* Only enable EP0 in and out for now, EP0 only works in FIFO mode */
  967. for (i = 0; i <= 1; i++) {
  968. udc_realize_hwep(udc, i, ep->ep.maxpacket);
  969. uda_enable_hwepint(udc, i);
  970. udc_select_hwep(udc, i);
  971. udc_clrstall_hwep(udc, i);
  972. udc_clr_buffer_hwep(udc, i);
  973. }
  974. /* Device interrupt setup */
  975. uda_clear_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
  976. USBD_EP_FAST));
  977. uda_enable_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
  978. USBD_EP_FAST));
  979. /* Set device address to 0 - called twice to force a latch in the USB
  980. engine without the need of a setup packet status closure */
  981. udc_set_address(udc, 0);
  982. udc_set_address(udc, 0);
  983. /* Enable master DMA interrupts */
  984. writel((USBD_SYS_ERR_INT | USBD_EOT_INT),
  985. USBD_DMAINTEN(udc->udp_baseaddr));
  986. udc->dev_status = 0;
  987. }
  988. /*
  989. *
  990. * USB device board specific events handled via callbacks
  991. *
  992. */
  993. /* Connection change event - notify board function of change */
  994. static void uda_power_event(struct lpc32xx_udc *udc, u32 conn)
  995. {
  996. /* Just notify of a connection change event (optional) */
  997. if (udc->board->conn_chgb != NULL)
  998. udc->board->conn_chgb(conn);
  999. }
  1000. /* Suspend/resume event - notify board function of change */
  1001. static void uda_resm_susp_event(struct lpc32xx_udc *udc, u32 conn)
  1002. {
  1003. /* Just notify of a Suspend/resume change event (optional) */
  1004. if (udc->board->susp_chgb != NULL)
  1005. udc->board->susp_chgb(conn);
  1006. if (conn)
  1007. udc->suspended = 0;
  1008. else
  1009. udc->suspended = 1;
  1010. }
  1011. /* Remote wakeup enable/disable - notify board function of change */
  1012. static void uda_remwkp_cgh(struct lpc32xx_udc *udc)
  1013. {
  1014. if (udc->board->rmwk_chgb != NULL)
  1015. udc->board->rmwk_chgb(udc->dev_status &
  1016. (1 << USB_DEVICE_REMOTE_WAKEUP));
  1017. }
  1018. /* Reads data from FIFO, adjusts for alignment and data size */
  1019. static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
  1020. {
  1021. int n, i, bl;
  1022. u16 *p16;
  1023. u32 *p32, tmp, cbytes;
  1024. /* Use optimal data transfer method based on source address and size */
  1025. switch (((u32) data) & 0x3) {
  1026. case 0: /* 32-bit aligned */
  1027. p32 = (u32 *) data;
  1028. cbytes = (bytes & ~0x3);
  1029. /* Copy 32-bit aligned data first */
  1030. for (n = 0; n < cbytes; n += 4)
  1031. *p32++ = readl(USBD_RXDATA(udc->udp_baseaddr));
  1032. /* Handle any remaining bytes */
  1033. bl = bytes - cbytes;
  1034. if (bl) {
  1035. tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
  1036. for (n = 0; n < bl; n++)
  1037. data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
  1038. }
  1039. break;
  1040. case 1: /* 8-bit aligned */
  1041. case 3:
  1042. /* Each byte has to be handled independently */
  1043. for (n = 0; n < bytes; n += 4) {
  1044. tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
  1045. bl = bytes - n;
  1046. if (bl > 3)
  1047. bl = 3;
  1048. for (i = 0; i < bl; i++)
  1049. data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
  1050. }
  1051. break;
  1052. case 2: /* 16-bit aligned */
  1053. p16 = (u16 *) data;
  1054. cbytes = (bytes & ~0x3);
  1055. /* Copy 32-bit sized objects first with 16-bit alignment */
  1056. for (n = 0; n < cbytes; n += 4) {
  1057. tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
  1058. *p16++ = (u16)(tmp & 0xFFFF);
  1059. *p16++ = (u16)((tmp >> 16) & 0xFFFF);
  1060. }
  1061. /* Handle any remaining bytes */
  1062. bl = bytes - cbytes;
  1063. if (bl) {
  1064. tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
  1065. for (n = 0; n < bl; n++)
  1066. data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
  1067. }
  1068. break;
  1069. }
  1070. }
  1071. /* Read data from the FIFO for an endpoint. This function is for endpoints (such
  1072. * as EP0) that don't use DMA. This function should only be called if a packet
  1073. * is known to be ready to read for the endpoint. Note that the endpoint must
  1074. * be selected in the protocol engine prior to this call. */
  1075. static u32 udc_read_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
  1076. u32 bytes)
  1077. {
  1078. u32 tmpv;
  1079. int to = 1000;
  1080. u32 tmp, hwrep = ((hwep & 0x1E) << 1) | CTRL_RD_EN;
  1081. /* Setup read of endpoint */
  1082. writel(hwrep, USBD_CTRL(udc->udp_baseaddr));
  1083. /* Wait until packet is ready */
  1084. while ((((tmpv = readl(USBD_RXPLEN(udc->udp_baseaddr))) &
  1085. PKT_RDY) == 0) && (to > 0))
  1086. to--;
  1087. if (!to)
  1088. dev_dbg(udc->dev, "No packet ready on FIFO EP read\n");
  1089. /* Mask out count */
  1090. tmp = tmpv & PKT_LNGTH_MASK;
  1091. if (bytes < tmp)
  1092. tmp = bytes;
  1093. if ((tmp > 0) && (data != NULL))
  1094. udc_pop_fifo(udc, (u8 *) data, tmp);
  1095. writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
  1096. /* Clear the buffer */
  1097. udc_clr_buffer_hwep(udc, hwep);
  1098. return tmp;
  1099. }
  1100. /* Stuffs data into the FIFO, adjusts for alignment and data size */
  1101. static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
  1102. {
  1103. int n, i, bl;
  1104. u16 *p16;
  1105. u32 *p32, tmp, cbytes;
  1106. /* Use optimal data transfer method based on source address and size */
  1107. switch (((u32) data) & 0x3) {
  1108. case 0: /* 32-bit aligned */
  1109. p32 = (u32 *) data;
  1110. cbytes = (bytes & ~0x3);
  1111. /* Copy 32-bit aligned data first */
  1112. for (n = 0; n < cbytes; n += 4)
  1113. writel(*p32++, USBD_TXDATA(udc->udp_baseaddr));
  1114. /* Handle any remaining bytes */
  1115. bl = bytes - cbytes;
  1116. if (bl) {
  1117. tmp = 0;
  1118. for (n = 0; n < bl; n++)
  1119. tmp |= data[cbytes + n] << (n * 8);
  1120. writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
  1121. }
  1122. break;
  1123. case 1: /* 8-bit aligned */
  1124. case 3:
  1125. /* Each byte has to be handled independently */
  1126. for (n = 0; n < bytes; n += 4) {
  1127. bl = bytes - n;
  1128. if (bl > 4)
  1129. bl = 4;
  1130. tmp = 0;
  1131. for (i = 0; i < bl; i++)
  1132. tmp |= data[n + i] << (i * 8);
  1133. writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
  1134. }
  1135. break;
  1136. case 2: /* 16-bit aligned */
  1137. p16 = (u16 *) data;
  1138. cbytes = (bytes & ~0x3);
  1139. /* Copy 32-bit aligned data first */
  1140. for (n = 0; n < cbytes; n += 4) {
  1141. tmp = *p16++ & 0xFFFF;
  1142. tmp |= (*p16++ & 0xFFFF) << 16;
  1143. writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
  1144. }
  1145. /* Handle any remaining bytes */
  1146. bl = bytes - cbytes;
  1147. if (bl) {
  1148. tmp = 0;
  1149. for (n = 0; n < bl; n++)
  1150. tmp |= data[cbytes + n] << (n * 8);
  1151. writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
  1152. }
  1153. break;
  1154. }
  1155. }
  1156. /* Write data to the FIFO for an endpoint. This function is for endpoints (such
  1157. * as EP0) that don't use DMA. Note that the endpoint must be selected in the
  1158. * protocol engine prior to this call. */
  1159. static void udc_write_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
  1160. u32 bytes)
  1161. {
  1162. u32 hwwep = ((hwep & 0x1E) << 1) | CTRL_WR_EN;
  1163. if ((bytes > 0) && (data == NULL))
  1164. return;
  1165. /* Setup write of endpoint */
  1166. writel(hwwep, USBD_CTRL(udc->udp_baseaddr));
  1167. writel(bytes, USBD_TXPLEN(udc->udp_baseaddr));
  1168. /* Need at least 1 byte to trigger TX */
  1169. if (bytes == 0)
  1170. writel(0, USBD_TXDATA(udc->udp_baseaddr));
  1171. else
  1172. udc_stuff_fifo(udc, (u8 *) data, bytes);
  1173. writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
  1174. udc_val_buffer_hwep(udc, hwep);
  1175. }
  1176. /* USB device reset - resets USB to a default state with just EP0
  1177. enabled */
  1178. static void uda_usb_reset(struct lpc32xx_udc *udc)
  1179. {
  1180. u32 i = 0;
  1181. /* Re-init device controller and EP0 */
  1182. udc_enable(udc);
  1183. udc->gadget.speed = USB_SPEED_FULL;
  1184. for (i = 1; i < NUM_ENDPOINTS; i++) {
  1185. struct lpc32xx_ep *ep = &udc->ep[i];
  1186. ep->req_pending = 0;
  1187. }
  1188. }
  1189. /* Send a ZLP on EP0 */
  1190. static void udc_ep0_send_zlp(struct lpc32xx_udc *udc)
  1191. {
  1192. udc_write_hwep(udc, EP_IN, NULL, 0);
  1193. }
  1194. /* Get current frame number */
  1195. static u16 udc_get_current_frame(struct lpc32xx_udc *udc)
  1196. {
  1197. u16 flo, fhi;
  1198. udc_protocol_cmd_w(udc, CMD_RD_FRAME);
  1199. flo = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
  1200. fhi = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
  1201. return (fhi << 8) | flo;
  1202. }
  1203. /* Set the device as configured - enables all endpoints */
  1204. static inline void udc_set_device_configured(struct lpc32xx_udc *udc)
  1205. {
  1206. udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(CONF_DVICE));
  1207. }
  1208. /* Set the device as unconfigured - disables all endpoints */
  1209. static inline void udc_set_device_unconfigured(struct lpc32xx_udc *udc)
  1210. {
  1211. udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
  1212. }
  1213. /* reinit == restore initial software state */
  1214. static void udc_reinit(struct lpc32xx_udc *udc)
  1215. {
  1216. u32 i;
  1217. INIT_LIST_HEAD(&udc->gadget.ep_list);
  1218. INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
  1219. for (i = 0; i < NUM_ENDPOINTS; i++) {
  1220. struct lpc32xx_ep *ep = &udc->ep[i];
  1221. if (i != 0)
  1222. list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
  1223. usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket);
  1224. INIT_LIST_HEAD(&ep->queue);
  1225. ep->req_pending = 0;
  1226. }
  1227. udc->ep0state = WAIT_FOR_SETUP;
  1228. }
  1229. /* Must be called with lock */
  1230. static void done(struct lpc32xx_ep *ep, struct lpc32xx_request *req, int status)
  1231. {
  1232. struct lpc32xx_udc *udc = ep->udc;
  1233. list_del_init(&req->queue);
  1234. if (req->req.status == -EINPROGRESS)
  1235. req->req.status = status;
  1236. else
  1237. status = req->req.status;
  1238. if (ep->lep) {
  1239. usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in);
  1240. /* Free DDs */
  1241. udc_dd_free(udc, req->dd_desc_ptr);
  1242. }
  1243. if (status && status != -ESHUTDOWN)
  1244. ep_dbg(ep, "%s done %p, status %d\n", ep->ep.name, req, status);
  1245. ep->req_pending = 0;
  1246. spin_unlock(&udc->lock);
  1247. usb_gadget_giveback_request(&ep->ep, &req->req);
  1248. spin_lock(&udc->lock);
  1249. }
  1250. /* Must be called with lock */
  1251. static void nuke(struct lpc32xx_ep *ep, int status)
  1252. {
  1253. struct lpc32xx_request *req;
  1254. while (!list_empty(&ep->queue)) {
  1255. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  1256. done(ep, req, status);
  1257. }
  1258. if (status == -ESHUTDOWN) {
  1259. uda_disable_hwepint(ep->udc, ep->hwep_num);
  1260. udc_disable_hwep(ep->udc, ep->hwep_num);
  1261. }
  1262. }
  1263. /* IN endpoint 0 transfer */
  1264. static int udc_ep0_in_req(struct lpc32xx_udc *udc)
  1265. {
  1266. struct lpc32xx_request *req;
  1267. struct lpc32xx_ep *ep0 = &udc->ep[0];
  1268. u32 tsend, ts = 0;
  1269. if (list_empty(&ep0->queue))
  1270. /* Nothing to send */
  1271. return 0;
  1272. else
  1273. req = list_entry(ep0->queue.next, struct lpc32xx_request,
  1274. queue);
  1275. tsend = ts = req->req.length - req->req.actual;
  1276. if (ts == 0) {
  1277. /* Send a ZLP */
  1278. udc_ep0_send_zlp(udc);
  1279. done(ep0, req, 0);
  1280. return 1;
  1281. } else if (ts > ep0->ep.maxpacket)
  1282. ts = ep0->ep.maxpacket; /* Just send what we can */
  1283. /* Write data to the EP0 FIFO and start transfer */
  1284. udc_write_hwep(udc, EP_IN, (req->req.buf + req->req.actual), ts);
  1285. /* Increment data pointer */
  1286. req->req.actual += ts;
  1287. if (tsend >= ep0->ep.maxpacket)
  1288. return 0; /* Stay in data transfer state */
  1289. /* Transfer request is complete */
  1290. udc->ep0state = WAIT_FOR_SETUP;
  1291. done(ep0, req, 0);
  1292. return 1;
  1293. }
  1294. /* OUT endpoint 0 transfer */
  1295. static int udc_ep0_out_req(struct lpc32xx_udc *udc)
  1296. {
  1297. struct lpc32xx_request *req;
  1298. struct lpc32xx_ep *ep0 = &udc->ep[0];
  1299. u32 tr, bufferspace;
  1300. if (list_empty(&ep0->queue))
  1301. return 0;
  1302. else
  1303. req = list_entry(ep0->queue.next, struct lpc32xx_request,
  1304. queue);
  1305. if (req) {
  1306. if (req->req.length == 0) {
  1307. /* Just dequeue request */
  1308. done(ep0, req, 0);
  1309. udc->ep0state = WAIT_FOR_SETUP;
  1310. return 1;
  1311. }
  1312. /* Get data from FIFO */
  1313. bufferspace = req->req.length - req->req.actual;
  1314. if (bufferspace > ep0->ep.maxpacket)
  1315. bufferspace = ep0->ep.maxpacket;
  1316. /* Copy data to buffer */
  1317. prefetchw(req->req.buf + req->req.actual);
  1318. tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual,
  1319. bufferspace);
  1320. req->req.actual += bufferspace;
  1321. if (tr < ep0->ep.maxpacket) {
  1322. /* This is the last packet */
  1323. done(ep0, req, 0);
  1324. udc->ep0state = WAIT_FOR_SETUP;
  1325. return 1;
  1326. }
  1327. }
  1328. return 0;
  1329. }
  1330. /* Must be called with lock */
  1331. static void stop_activity(struct lpc32xx_udc *udc)
  1332. {
  1333. struct usb_gadget_driver *driver = udc->driver;
  1334. int i;
  1335. if (udc->gadget.speed == USB_SPEED_UNKNOWN)
  1336. driver = NULL;
  1337. udc->gadget.speed = USB_SPEED_UNKNOWN;
  1338. udc->suspended = 0;
  1339. for (i = 0; i < NUM_ENDPOINTS; i++) {
  1340. struct lpc32xx_ep *ep = &udc->ep[i];
  1341. nuke(ep, -ESHUTDOWN);
  1342. }
  1343. if (driver) {
  1344. spin_unlock(&udc->lock);
  1345. driver->disconnect(&udc->gadget);
  1346. spin_lock(&udc->lock);
  1347. }
  1348. isp1301_pullup_enable(udc, 0, 0);
  1349. udc_disable(udc);
  1350. udc_reinit(udc);
  1351. }
  1352. /*
  1353. * Activate or kill host pullup
  1354. * Can be called with or without lock
  1355. */
  1356. static void pullup(struct lpc32xx_udc *udc, int is_on)
  1357. {
  1358. if (!udc->clocked)
  1359. return;
  1360. if (!udc->enabled || !udc->vbus)
  1361. is_on = 0;
  1362. if (is_on != udc->pullup)
  1363. isp1301_pullup_enable(udc, is_on, 0);
  1364. }
  1365. /* Must be called without lock */
  1366. static int lpc32xx_ep_disable(struct usb_ep *_ep)
  1367. {
  1368. struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
  1369. struct lpc32xx_udc *udc = ep->udc;
  1370. unsigned long flags;
  1371. if ((ep->hwep_num_base == 0) || (ep->hwep_num == 0))
  1372. return -EINVAL;
  1373. spin_lock_irqsave(&udc->lock, flags);
  1374. nuke(ep, -ESHUTDOWN);
  1375. /* Clear all DMA statuses for this EP */
  1376. udc_ep_dma_disable(udc, ep->hwep_num);
  1377. writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
  1378. writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
  1379. writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
  1380. writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
  1381. /* Remove the DD pointer in the UDCA */
  1382. udc->udca_v_base[ep->hwep_num] = 0;
  1383. /* Disable and reset endpoint and interrupt */
  1384. uda_clear_hwepint(udc, ep->hwep_num);
  1385. udc_unrealize_hwep(udc, ep->hwep_num);
  1386. ep->hwep_num = 0;
  1387. spin_unlock_irqrestore(&udc->lock, flags);
  1388. atomic_dec(&udc->enabled_ep_cnt);
  1389. wake_up(&udc->ep_disable_wait_queue);
  1390. return 0;
  1391. }
  1392. /* Must be called without lock */
  1393. static int lpc32xx_ep_enable(struct usb_ep *_ep,
  1394. const struct usb_endpoint_descriptor *desc)
  1395. {
  1396. struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
  1397. struct lpc32xx_udc *udc = ep->udc;
  1398. u16 maxpacket;
  1399. u32 tmp;
  1400. unsigned long flags;
  1401. /* Verify EP data */
  1402. if ((!_ep) || (!ep) || (!desc) ||
  1403. (desc->bDescriptorType != USB_DT_ENDPOINT)) {
  1404. dev_dbg(udc->dev, "bad ep or descriptor\n");
  1405. return -EINVAL;
  1406. }
  1407. maxpacket = usb_endpoint_maxp(desc);
  1408. if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) {
  1409. dev_dbg(udc->dev, "bad ep descriptor's packet size\n");
  1410. return -EINVAL;
  1411. }
  1412. /* Don't touch EP0 */
  1413. if (ep->hwep_num_base == 0) {
  1414. dev_dbg(udc->dev, "Can't re-enable EP0!!!\n");
  1415. return -EINVAL;
  1416. }
  1417. /* Is driver ready? */
  1418. if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
  1419. dev_dbg(udc->dev, "bogus device state\n");
  1420. return -ESHUTDOWN;
  1421. }
  1422. tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
  1423. switch (tmp) {
  1424. case USB_ENDPOINT_XFER_CONTROL:
  1425. return -EINVAL;
  1426. case USB_ENDPOINT_XFER_INT:
  1427. if (maxpacket > ep->maxpacket) {
  1428. dev_dbg(udc->dev,
  1429. "Bad INT endpoint maxpacket %d\n", maxpacket);
  1430. return -EINVAL;
  1431. }
  1432. break;
  1433. case USB_ENDPOINT_XFER_BULK:
  1434. switch (maxpacket) {
  1435. case 8:
  1436. case 16:
  1437. case 32:
  1438. case 64:
  1439. break;
  1440. default:
  1441. dev_dbg(udc->dev,
  1442. "Bad BULK endpoint maxpacket %d\n", maxpacket);
  1443. return -EINVAL;
  1444. }
  1445. break;
  1446. case USB_ENDPOINT_XFER_ISOC:
  1447. break;
  1448. }
  1449. spin_lock_irqsave(&udc->lock, flags);
  1450. /* Initialize endpoint to match the selected descriptor */
  1451. ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
  1452. ep->ep.maxpacket = maxpacket;
  1453. /* Map hardware endpoint from base and direction */
  1454. if (ep->is_in)
  1455. /* IN endpoints are offset 1 from the OUT endpoint */
  1456. ep->hwep_num = ep->hwep_num_base + EP_IN;
  1457. else
  1458. ep->hwep_num = ep->hwep_num_base;
  1459. ep_dbg(ep, "EP enabled: %s, HW:%d, MP:%d IN:%d\n", ep->ep.name,
  1460. ep->hwep_num, maxpacket, (ep->is_in == 1));
  1461. /* Realize the endpoint, interrupt is enabled later when
  1462. * buffers are queued, IN EPs will NAK until buffers are ready */
  1463. udc_realize_hwep(udc, ep->hwep_num, ep->ep.maxpacket);
  1464. udc_clr_buffer_hwep(udc, ep->hwep_num);
  1465. uda_disable_hwepint(udc, ep->hwep_num);
  1466. udc_clrstall_hwep(udc, ep->hwep_num);
  1467. /* Clear all DMA statuses for this EP */
  1468. udc_ep_dma_disable(udc, ep->hwep_num);
  1469. writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
  1470. writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
  1471. writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
  1472. writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
  1473. spin_unlock_irqrestore(&udc->lock, flags);
  1474. atomic_inc(&udc->enabled_ep_cnt);
  1475. return 0;
  1476. }
  1477. /*
  1478. * Allocate a USB request list
  1479. * Can be called with or without lock
  1480. */
  1481. static struct usb_request *lpc32xx_ep_alloc_request(struct usb_ep *_ep,
  1482. gfp_t gfp_flags)
  1483. {
  1484. struct lpc32xx_request *req;
  1485. req = kzalloc(sizeof(struct lpc32xx_request), gfp_flags);
  1486. if (!req)
  1487. return NULL;
  1488. INIT_LIST_HEAD(&req->queue);
  1489. return &req->req;
  1490. }
  1491. /*
  1492. * De-allocate a USB request list
  1493. * Can be called with or without lock
  1494. */
  1495. static void lpc32xx_ep_free_request(struct usb_ep *_ep,
  1496. struct usb_request *_req)
  1497. {
  1498. struct lpc32xx_request *req;
  1499. req = container_of(_req, struct lpc32xx_request, req);
  1500. BUG_ON(!list_empty(&req->queue));
  1501. kfree(req);
  1502. }
  1503. /* Must be called without lock */
  1504. static int lpc32xx_ep_queue(struct usb_ep *_ep,
  1505. struct usb_request *_req, gfp_t gfp_flags)
  1506. {
  1507. struct lpc32xx_request *req;
  1508. struct lpc32xx_ep *ep;
  1509. struct lpc32xx_udc *udc;
  1510. unsigned long flags;
  1511. int status = 0;
  1512. req = container_of(_req, struct lpc32xx_request, req);
  1513. ep = container_of(_ep, struct lpc32xx_ep, ep);
  1514. if (!_ep || !_req || !_req->complete || !_req->buf ||
  1515. !list_empty(&req->queue))
  1516. return -EINVAL;
  1517. udc = ep->udc;
  1518. if (udc->gadget.speed == USB_SPEED_UNKNOWN)
  1519. return -EPIPE;
  1520. if (ep->lep) {
  1521. struct lpc32xx_usbd_dd_gad *dd;
  1522. status = usb_gadget_map_request(&udc->gadget, _req, ep->is_in);
  1523. if (status)
  1524. return status;
  1525. /* For the request, build a list of DDs */
  1526. dd = udc_dd_alloc(udc);
  1527. if (!dd) {
  1528. /* Error allocating DD */
  1529. return -ENOMEM;
  1530. }
  1531. req->dd_desc_ptr = dd;
  1532. /* Setup the DMA descriptor */
  1533. dd->dd_next_phy = dd->dd_next_v = 0;
  1534. dd->dd_buffer_addr = req->req.dma;
  1535. dd->dd_status = 0;
  1536. /* Special handling for ISO EPs */
  1537. if (ep->eptype == EP_ISO_TYPE) {
  1538. dd->dd_setup = DD_SETUP_ISO_EP |
  1539. DD_SETUP_PACKETLEN(0) |
  1540. DD_SETUP_DMALENBYTES(1);
  1541. dd->dd_iso_ps_mem_addr = dd->this_dma + 24;
  1542. if (ep->is_in)
  1543. dd->iso_status[0] = req->req.length;
  1544. else
  1545. dd->iso_status[0] = 0;
  1546. } else
  1547. dd->dd_setup = DD_SETUP_PACKETLEN(ep->ep.maxpacket) |
  1548. DD_SETUP_DMALENBYTES(req->req.length);
  1549. }
  1550. ep_dbg(ep, "%s queue req %p len %d buf %p (in=%d) z=%d\n", _ep->name,
  1551. _req, _req->length, _req->buf, ep->is_in, _req->zero);
  1552. spin_lock_irqsave(&udc->lock, flags);
  1553. _req->status = -EINPROGRESS;
  1554. _req->actual = 0;
  1555. req->send_zlp = _req->zero;
  1556. /* Kickstart empty queues */
  1557. if (list_empty(&ep->queue)) {
  1558. list_add_tail(&req->queue, &ep->queue);
  1559. if (ep->hwep_num_base == 0) {
  1560. /* Handle expected data direction */
  1561. if (ep->is_in) {
  1562. /* IN packet to host */
  1563. udc->ep0state = DATA_IN;
  1564. status = udc_ep0_in_req(udc);
  1565. } else {
  1566. /* OUT packet from host */
  1567. udc->ep0state = DATA_OUT;
  1568. status = udc_ep0_out_req(udc);
  1569. }
  1570. } else if (ep->is_in) {
  1571. /* IN packet to host and kick off transfer */
  1572. if (!ep->req_pending)
  1573. udc_ep_in_req_dma(udc, ep);
  1574. } else
  1575. /* OUT packet from host and kick off list */
  1576. if (!ep->req_pending)
  1577. udc_ep_out_req_dma(udc, ep);
  1578. } else
  1579. list_add_tail(&req->queue, &ep->queue);
  1580. spin_unlock_irqrestore(&udc->lock, flags);
  1581. return (status < 0) ? status : 0;
  1582. }
  1583. /* Must be called without lock */
  1584. static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  1585. {
  1586. struct lpc32xx_ep *ep;
  1587. struct lpc32xx_request *req;
  1588. unsigned long flags;
  1589. ep = container_of(_ep, struct lpc32xx_ep, ep);
  1590. if (!_ep || ep->hwep_num_base == 0)
  1591. return -EINVAL;
  1592. spin_lock_irqsave(&ep->udc->lock, flags);
  1593. /* make sure it's actually queued on this endpoint */
  1594. list_for_each_entry(req, &ep->queue, queue) {
  1595. if (&req->req == _req)
  1596. break;
  1597. }
  1598. if (&req->req != _req) {
  1599. spin_unlock_irqrestore(&ep->udc->lock, flags);
  1600. return -EINVAL;
  1601. }
  1602. done(ep, req, -ECONNRESET);
  1603. spin_unlock_irqrestore(&ep->udc->lock, flags);
  1604. return 0;
  1605. }
  1606. /* Must be called without lock */
  1607. static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
  1608. {
  1609. struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
  1610. struct lpc32xx_udc *udc = ep->udc;
  1611. unsigned long flags;
  1612. if ((!ep) || (ep->hwep_num <= 1))
  1613. return -EINVAL;
  1614. /* Don't halt an IN EP */
  1615. if (ep->is_in)
  1616. return -EAGAIN;
  1617. spin_lock_irqsave(&udc->lock, flags);
  1618. if (value == 1) {
  1619. /* stall */
  1620. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
  1621. DAT_WR_BYTE(EP_STAT_ST));
  1622. } else {
  1623. /* End stall */
  1624. ep->wedge = 0;
  1625. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
  1626. DAT_WR_BYTE(0));
  1627. }
  1628. spin_unlock_irqrestore(&udc->lock, flags);
  1629. return 0;
  1630. }
  1631. /* set the halt feature and ignores clear requests */
  1632. static int lpc32xx_ep_set_wedge(struct usb_ep *_ep)
  1633. {
  1634. struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
  1635. if (!_ep || !ep->udc)
  1636. return -EINVAL;
  1637. ep->wedge = 1;
  1638. return usb_ep_set_halt(_ep);
  1639. }
  1640. static const struct usb_ep_ops lpc32xx_ep_ops = {
  1641. .enable = lpc32xx_ep_enable,
  1642. .disable = lpc32xx_ep_disable,
  1643. .alloc_request = lpc32xx_ep_alloc_request,
  1644. .free_request = lpc32xx_ep_free_request,
  1645. .queue = lpc32xx_ep_queue,
  1646. .dequeue = lpc32xx_ep_dequeue,
  1647. .set_halt = lpc32xx_ep_set_halt,
  1648. .set_wedge = lpc32xx_ep_set_wedge,
  1649. };
  1650. /* Send a ZLP on a non-0 IN EP */
  1651. void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  1652. {
  1653. /* Clear EP status */
  1654. udc_clearep_getsts(udc, ep->hwep_num);
  1655. /* Send ZLP via FIFO mechanism */
  1656. udc_write_hwep(udc, ep->hwep_num, NULL, 0);
  1657. }
  1658. /*
  1659. * Handle EP completion for ZLP
  1660. * This function will only be called when a delayed ZLP needs to be sent out
  1661. * after a DMA transfer has filled both buffers.
  1662. */
  1663. void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  1664. {
  1665. u32 epstatus;
  1666. struct lpc32xx_request *req;
  1667. if (ep->hwep_num <= 0)
  1668. return;
  1669. uda_clear_hwepint(udc, ep->hwep_num);
  1670. /* If this interrupt isn't enabled, return now */
  1671. if (!(udc->enabled_hwepints & (1 << ep->hwep_num)))
  1672. return;
  1673. /* Get endpoint status */
  1674. epstatus = udc_clearep_getsts(udc, ep->hwep_num);
  1675. /*
  1676. * This should never happen, but protect against writing to the
  1677. * buffer when full.
  1678. */
  1679. if (epstatus & EP_SEL_F)
  1680. return;
  1681. if (ep->is_in) {
  1682. udc_send_in_zlp(udc, ep);
  1683. uda_disable_hwepint(udc, ep->hwep_num);
  1684. } else
  1685. return;
  1686. /* If there isn't a request waiting, something went wrong */
  1687. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  1688. if (req) {
  1689. done(ep, req, 0);
  1690. /* Start another request if ready */
  1691. if (!list_empty(&ep->queue)) {
  1692. if (ep->is_in)
  1693. udc_ep_in_req_dma(udc, ep);
  1694. else
  1695. udc_ep_out_req_dma(udc, ep);
  1696. } else
  1697. ep->req_pending = 0;
  1698. }
  1699. }
  1700. /* DMA end of transfer completion */
  1701. static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  1702. {
  1703. u32 status, epstatus;
  1704. struct lpc32xx_request *req;
  1705. struct lpc32xx_usbd_dd_gad *dd;
  1706. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  1707. ep->totalints++;
  1708. #endif
  1709. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  1710. if (!req) {
  1711. ep_err(ep, "DMA interrupt on no req!\n");
  1712. return;
  1713. }
  1714. dd = req->dd_desc_ptr;
  1715. /* DMA descriptor should always be retired for this call */
  1716. if (!(dd->dd_status & DD_STATUS_DD_RETIRED))
  1717. ep_warn(ep, "DMA descriptor did not retire\n");
  1718. /* Disable DMA */
  1719. udc_ep_dma_disable(udc, ep->hwep_num);
  1720. writel((1 << ep->hwep_num), USBD_EOTINTCLR(udc->udp_baseaddr));
  1721. writel((1 << ep->hwep_num), USBD_NDDRTINTCLR(udc->udp_baseaddr));
  1722. /* System error? */
  1723. if (readl(USBD_SYSERRTINTST(udc->udp_baseaddr)) &
  1724. (1 << ep->hwep_num)) {
  1725. writel((1 << ep->hwep_num),
  1726. USBD_SYSERRTINTCLR(udc->udp_baseaddr));
  1727. ep_err(ep, "AHB critical error!\n");
  1728. ep->req_pending = 0;
  1729. /* The error could have occurred on a packet of a multipacket
  1730. * transfer, so recovering the transfer is not possible. Close
  1731. * the request with an error */
  1732. done(ep, req, -ECONNABORTED);
  1733. return;
  1734. }
  1735. /* Handle the current DD's status */
  1736. status = dd->dd_status;
  1737. switch (status & DD_STATUS_STS_MASK) {
  1738. case DD_STATUS_STS_NS:
  1739. /* DD not serviced? This shouldn't happen! */
  1740. ep->req_pending = 0;
  1741. ep_err(ep, "DMA critical EP error: DD not serviced (0x%x)!\n",
  1742. status);
  1743. done(ep, req, -ECONNABORTED);
  1744. return;
  1745. case DD_STATUS_STS_BS:
  1746. /* Interrupt only fires on EOT - This shouldn't happen! */
  1747. ep->req_pending = 0;
  1748. ep_err(ep, "DMA critical EP error: EOT prior to service completion (0x%x)!\n",
  1749. status);
  1750. done(ep, req, -ECONNABORTED);
  1751. return;
  1752. case DD_STATUS_STS_NC:
  1753. case DD_STATUS_STS_DUR:
  1754. /* Really just a short packet, not an underrun */
  1755. /* This is a good status and what we expect */
  1756. break;
  1757. default:
  1758. /* Data overrun, system error, or unknown */
  1759. ep->req_pending = 0;
  1760. ep_err(ep, "DMA critical EP error: System error (0x%x)!\n",
  1761. status);
  1762. done(ep, req, -ECONNABORTED);
  1763. return;
  1764. }
  1765. /* ISO endpoints are handled differently */
  1766. if (ep->eptype == EP_ISO_TYPE) {
  1767. if (ep->is_in)
  1768. req->req.actual = req->req.length;
  1769. else
  1770. req->req.actual = dd->iso_status[0] & 0xFFFF;
  1771. } else
  1772. req->req.actual += DD_STATUS_CURDMACNT(status);
  1773. /* Send a ZLP if necessary. This will be done for non-int
  1774. * packets which have a size that is a divisor of MAXP */
  1775. if (req->send_zlp) {
  1776. /*
  1777. * If at least 1 buffer is available, send the ZLP now.
  1778. * Otherwise, the ZLP send needs to be deferred until a
  1779. * buffer is available.
  1780. */
  1781. if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) {
  1782. udc_clearep_getsts(udc, ep->hwep_num);
  1783. uda_enable_hwepint(udc, ep->hwep_num);
  1784. epstatus = udc_clearep_getsts(udc, ep->hwep_num);
  1785. /* Let the EP interrupt handle the ZLP */
  1786. return;
  1787. } else
  1788. udc_send_in_zlp(udc, ep);
  1789. }
  1790. /* Transfer request is complete */
  1791. done(ep, req, 0);
  1792. /* Start another request if ready */
  1793. udc_clearep_getsts(udc, ep->hwep_num);
  1794. if (!list_empty((&ep->queue))) {
  1795. if (ep->is_in)
  1796. udc_ep_in_req_dma(udc, ep);
  1797. else
  1798. udc_ep_out_req_dma(udc, ep);
  1799. } else
  1800. ep->req_pending = 0;
  1801. }
  1802. /*
  1803. *
  1804. * Endpoint 0 functions
  1805. *
  1806. */
  1807. static void udc_handle_dev(struct lpc32xx_udc *udc)
  1808. {
  1809. u32 tmp;
  1810. udc_protocol_cmd_w(udc, CMD_GET_DEV_STAT);
  1811. tmp = udc_protocol_cmd_r(udc, DAT_GET_DEV_STAT);
  1812. if (tmp & DEV_RST)
  1813. uda_usb_reset(udc);
  1814. else if (tmp & DEV_CON_CH)
  1815. uda_power_event(udc, (tmp & DEV_CON));
  1816. else if (tmp & DEV_SUS_CH) {
  1817. if (tmp & DEV_SUS) {
  1818. if (udc->vbus == 0)
  1819. stop_activity(udc);
  1820. else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
  1821. udc->driver) {
  1822. /* Power down transceiver */
  1823. udc->poweron = 0;
  1824. schedule_work(&udc->pullup_job);
  1825. uda_resm_susp_event(udc, 1);
  1826. }
  1827. } else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
  1828. udc->driver && udc->vbus) {
  1829. uda_resm_susp_event(udc, 0);
  1830. /* Power up transceiver */
  1831. udc->poweron = 1;
  1832. schedule_work(&udc->pullup_job);
  1833. }
  1834. }
  1835. }
  1836. static int udc_get_status(struct lpc32xx_udc *udc, u16 reqtype, u16 wIndex)
  1837. {
  1838. struct lpc32xx_ep *ep;
  1839. u32 ep0buff = 0, tmp;
  1840. switch (reqtype & USB_RECIP_MASK) {
  1841. case USB_RECIP_INTERFACE:
  1842. break; /* Not supported */
  1843. case USB_RECIP_DEVICE:
  1844. ep0buff = udc->gadget.is_selfpowered;
  1845. if (udc->dev_status & (1 << USB_DEVICE_REMOTE_WAKEUP))
  1846. ep0buff |= (1 << USB_DEVICE_REMOTE_WAKEUP);
  1847. break;
  1848. case USB_RECIP_ENDPOINT:
  1849. tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
  1850. ep = &udc->ep[tmp];
  1851. if ((tmp == 0) || (tmp >= NUM_ENDPOINTS))
  1852. return -EOPNOTSUPP;
  1853. if (wIndex & USB_DIR_IN) {
  1854. if (!ep->is_in)
  1855. return -EOPNOTSUPP; /* Something's wrong */
  1856. } else if (ep->is_in)
  1857. return -EOPNOTSUPP; /* Not an IN endpoint */
  1858. /* Get status of the endpoint */
  1859. udc_protocol_cmd_w(udc, CMD_SEL_EP(ep->hwep_num));
  1860. tmp = udc_protocol_cmd_r(udc, DAT_SEL_EP(ep->hwep_num));
  1861. if (tmp & EP_SEL_ST)
  1862. ep0buff = (1 << USB_ENDPOINT_HALT);
  1863. else
  1864. ep0buff = 0;
  1865. break;
  1866. default:
  1867. break;
  1868. }
  1869. /* Return data */
  1870. udc_write_hwep(udc, EP_IN, &ep0buff, 2);
  1871. return 0;
  1872. }
  1873. static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
  1874. {
  1875. struct lpc32xx_ep *ep, *ep0 = &udc->ep[0];
  1876. struct usb_ctrlrequest ctrlpkt;
  1877. int i, bytes;
  1878. u16 wIndex, wValue, wLength, reqtype, req, tmp;
  1879. /* Nuke previous transfers */
  1880. nuke(ep0, -EPROTO);
  1881. /* Get setup packet */
  1882. bytes = udc_read_hwep(udc, EP_OUT, (u32 *) &ctrlpkt, 8);
  1883. if (bytes != 8) {
  1884. ep_warn(ep0, "Incorrectly sized setup packet (s/b 8, is %d)!\n",
  1885. bytes);
  1886. return;
  1887. }
  1888. /* Native endianness */
  1889. wIndex = le16_to_cpu(ctrlpkt.wIndex);
  1890. wValue = le16_to_cpu(ctrlpkt.wValue);
  1891. wLength = le16_to_cpu(ctrlpkt.wLength);
  1892. reqtype = le16_to_cpu(ctrlpkt.bRequestType);
  1893. /* Set direction of EP0 */
  1894. if (likely(reqtype & USB_DIR_IN))
  1895. ep0->is_in = 1;
  1896. else
  1897. ep0->is_in = 0;
  1898. /* Handle SETUP packet */
  1899. req = le16_to_cpu(ctrlpkt.bRequest);
  1900. switch (req) {
  1901. case USB_REQ_CLEAR_FEATURE:
  1902. case USB_REQ_SET_FEATURE:
  1903. switch (reqtype) {
  1904. case (USB_TYPE_STANDARD | USB_RECIP_DEVICE):
  1905. if (wValue != USB_DEVICE_REMOTE_WAKEUP)
  1906. goto stall; /* Nothing else handled */
  1907. /* Tell board about event */
  1908. if (req == USB_REQ_CLEAR_FEATURE)
  1909. udc->dev_status &=
  1910. ~(1 << USB_DEVICE_REMOTE_WAKEUP);
  1911. else
  1912. udc->dev_status |=
  1913. (1 << USB_DEVICE_REMOTE_WAKEUP);
  1914. uda_remwkp_cgh(udc);
  1915. goto zlp_send;
  1916. case (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
  1917. tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
  1918. if ((wValue != USB_ENDPOINT_HALT) ||
  1919. (tmp >= NUM_ENDPOINTS))
  1920. break;
  1921. /* Find hardware endpoint from logical endpoint */
  1922. ep = &udc->ep[tmp];
  1923. tmp = ep->hwep_num;
  1924. if (tmp == 0)
  1925. break;
  1926. if (req == USB_REQ_SET_FEATURE)
  1927. udc_stall_hwep(udc, tmp);
  1928. else if (!ep->wedge)
  1929. udc_clrstall_hwep(udc, tmp);
  1930. goto zlp_send;
  1931. default:
  1932. break;
  1933. }
  1934. case USB_REQ_SET_ADDRESS:
  1935. if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
  1936. udc_set_address(udc, wValue);
  1937. goto zlp_send;
  1938. }
  1939. break;
  1940. case USB_REQ_GET_STATUS:
  1941. udc_get_status(udc, reqtype, wIndex);
  1942. return;
  1943. default:
  1944. break; /* Let GadgetFS handle the descriptor instead */
  1945. }
  1946. if (likely(udc->driver)) {
  1947. /* device-2-host (IN) or no data setup command, process
  1948. * immediately */
  1949. spin_unlock(&udc->lock);
  1950. i = udc->driver->setup(&udc->gadget, &ctrlpkt);
  1951. spin_lock(&udc->lock);
  1952. if (req == USB_REQ_SET_CONFIGURATION) {
  1953. /* Configuration is set after endpoints are realized */
  1954. if (wValue) {
  1955. /* Set configuration */
  1956. udc_set_device_configured(udc);
  1957. udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
  1958. DAT_WR_BYTE(AP_CLK |
  1959. INAK_BI | INAK_II));
  1960. } else {
  1961. /* Clear configuration */
  1962. udc_set_device_unconfigured(udc);
  1963. /* Disable NAK interrupts */
  1964. udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
  1965. DAT_WR_BYTE(AP_CLK));
  1966. }
  1967. }
  1968. if (i < 0) {
  1969. /* setup processing failed, force stall */
  1970. dev_dbg(udc->dev,
  1971. "req %02x.%02x protocol STALL; stat %d\n",
  1972. reqtype, req, i);
  1973. udc->ep0state = WAIT_FOR_SETUP;
  1974. goto stall;
  1975. }
  1976. }
  1977. if (!ep0->is_in)
  1978. udc_ep0_send_zlp(udc); /* ZLP IN packet on data phase */
  1979. return;
  1980. stall:
  1981. udc_stall_hwep(udc, EP_IN);
  1982. return;
  1983. zlp_send:
  1984. udc_ep0_send_zlp(udc);
  1985. return;
  1986. }
  1987. /* IN endpoint 0 transfer */
  1988. static void udc_handle_ep0_in(struct lpc32xx_udc *udc)
  1989. {
  1990. struct lpc32xx_ep *ep0 = &udc->ep[0];
  1991. u32 epstatus;
  1992. /* Clear EP interrupt */
  1993. epstatus = udc_clearep_getsts(udc, EP_IN);
  1994. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  1995. ep0->totalints++;
  1996. #endif
  1997. /* Stalled? Clear stall and reset buffers */
  1998. if (epstatus & EP_SEL_ST) {
  1999. udc_clrstall_hwep(udc, EP_IN);
  2000. nuke(ep0, -ECONNABORTED);
  2001. udc->ep0state = WAIT_FOR_SETUP;
  2002. return;
  2003. }
  2004. /* Is a buffer available? */
  2005. if (!(epstatus & EP_SEL_F)) {
  2006. /* Handle based on current state */
  2007. if (udc->ep0state == DATA_IN)
  2008. udc_ep0_in_req(udc);
  2009. else {
  2010. /* Unknown state for EP0 oe end of DATA IN phase */
  2011. nuke(ep0, -ECONNABORTED);
  2012. udc->ep0state = WAIT_FOR_SETUP;
  2013. }
  2014. }
  2015. }
  2016. /* OUT endpoint 0 transfer */
  2017. static void udc_handle_ep0_out(struct lpc32xx_udc *udc)
  2018. {
  2019. struct lpc32xx_ep *ep0 = &udc->ep[0];
  2020. u32 epstatus;
  2021. /* Clear EP interrupt */
  2022. epstatus = udc_clearep_getsts(udc, EP_OUT);
  2023. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  2024. ep0->totalints++;
  2025. #endif
  2026. /* Stalled? */
  2027. if (epstatus & EP_SEL_ST) {
  2028. udc_clrstall_hwep(udc, EP_OUT);
  2029. nuke(ep0, -ECONNABORTED);
  2030. udc->ep0state = WAIT_FOR_SETUP;
  2031. return;
  2032. }
  2033. /* A NAK may occur if a packet couldn't be received yet */
  2034. if (epstatus & EP_SEL_EPN)
  2035. return;
  2036. /* Setup packet incoming? */
  2037. if (epstatus & EP_SEL_STP) {
  2038. nuke(ep0, 0);
  2039. udc->ep0state = WAIT_FOR_SETUP;
  2040. }
  2041. /* Data available? */
  2042. if (epstatus & EP_SEL_F)
  2043. /* Handle based on current state */
  2044. switch (udc->ep0state) {
  2045. case WAIT_FOR_SETUP:
  2046. udc_handle_ep0_setup(udc);
  2047. break;
  2048. case DATA_OUT:
  2049. udc_ep0_out_req(udc);
  2050. break;
  2051. default:
  2052. /* Unknown state for EP0 */
  2053. nuke(ep0, -ECONNABORTED);
  2054. udc->ep0state = WAIT_FOR_SETUP;
  2055. }
  2056. }
  2057. /* Must be called without lock */
  2058. static int lpc32xx_get_frame(struct usb_gadget *gadget)
  2059. {
  2060. int frame;
  2061. unsigned long flags;
  2062. struct lpc32xx_udc *udc = to_udc(gadget);
  2063. if (!udc->clocked)
  2064. return -EINVAL;
  2065. spin_lock_irqsave(&udc->lock, flags);
  2066. frame = (int) udc_get_current_frame(udc);
  2067. spin_unlock_irqrestore(&udc->lock, flags);
  2068. return frame;
  2069. }
  2070. static int lpc32xx_wakeup(struct usb_gadget *gadget)
  2071. {
  2072. return -ENOTSUPP;
  2073. }
  2074. static int lpc32xx_set_selfpowered(struct usb_gadget *gadget, int is_on)
  2075. {
  2076. gadget->is_selfpowered = (is_on != 0);
  2077. return 0;
  2078. }
  2079. /*
  2080. * vbus is here! turn everything on that's ready
  2081. * Must be called without lock
  2082. */
  2083. static int lpc32xx_vbus_session(struct usb_gadget *gadget, int is_active)
  2084. {
  2085. unsigned long flags;
  2086. struct lpc32xx_udc *udc = to_udc(gadget);
  2087. spin_lock_irqsave(&udc->lock, flags);
  2088. /* Doesn't need lock */
  2089. if (udc->driver) {
  2090. udc_clk_set(udc, 1);
  2091. udc_enable(udc);
  2092. pullup(udc, is_active);
  2093. } else {
  2094. stop_activity(udc);
  2095. pullup(udc, 0);
  2096. spin_unlock_irqrestore(&udc->lock, flags);
  2097. /*
  2098. * Wait for all the endpoints to disable,
  2099. * before disabling clocks. Don't wait if
  2100. * endpoints are not enabled.
  2101. */
  2102. if (atomic_read(&udc->enabled_ep_cnt))
  2103. wait_event_interruptible(udc->ep_disable_wait_queue,
  2104. (atomic_read(&udc->enabled_ep_cnt) == 0));
  2105. spin_lock_irqsave(&udc->lock, flags);
  2106. udc_clk_set(udc, 0);
  2107. }
  2108. spin_unlock_irqrestore(&udc->lock, flags);
  2109. return 0;
  2110. }
  2111. /* Can be called with or without lock */
  2112. static int lpc32xx_pullup(struct usb_gadget *gadget, int is_on)
  2113. {
  2114. struct lpc32xx_udc *udc = to_udc(gadget);
  2115. /* Doesn't need lock */
  2116. pullup(udc, is_on);
  2117. return 0;
  2118. }
  2119. static int lpc32xx_start(struct usb_gadget *, struct usb_gadget_driver *);
  2120. static int lpc32xx_stop(struct usb_gadget *);
  2121. static const struct usb_gadget_ops lpc32xx_udc_ops = {
  2122. .get_frame = lpc32xx_get_frame,
  2123. .wakeup = lpc32xx_wakeup,
  2124. .set_selfpowered = lpc32xx_set_selfpowered,
  2125. .vbus_session = lpc32xx_vbus_session,
  2126. .pullup = lpc32xx_pullup,
  2127. .udc_start = lpc32xx_start,
  2128. .udc_stop = lpc32xx_stop,
  2129. };
  2130. static void nop_release(struct device *dev)
  2131. {
  2132. /* nothing to free */
  2133. }
  2134. static const struct lpc32xx_udc controller_template = {
  2135. .gadget = {
  2136. .ops = &lpc32xx_udc_ops,
  2137. .name = driver_name,
  2138. .dev = {
  2139. .init_name = "gadget",
  2140. .release = nop_release,
  2141. }
  2142. },
  2143. .ep[0] = {
  2144. .ep = {
  2145. .name = "ep0",
  2146. .ops = &lpc32xx_ep_ops,
  2147. },
  2148. .maxpacket = 64,
  2149. .hwep_num_base = 0,
  2150. .hwep_num = 0, /* Can be 0 or 1, has special handling */
  2151. .lep = 0,
  2152. .eptype = EP_CTL_TYPE,
  2153. },
  2154. .ep[1] = {
  2155. .ep = {
  2156. .name = "ep1-int",
  2157. .ops = &lpc32xx_ep_ops,
  2158. },
  2159. .maxpacket = 64,
  2160. .hwep_num_base = 2,
  2161. .hwep_num = 0, /* 2 or 3, will be set later */
  2162. .lep = 1,
  2163. .eptype = EP_INT_TYPE,
  2164. },
  2165. .ep[2] = {
  2166. .ep = {
  2167. .name = "ep2-bulk",
  2168. .ops = &lpc32xx_ep_ops,
  2169. },
  2170. .maxpacket = 64,
  2171. .hwep_num_base = 4,
  2172. .hwep_num = 0, /* 4 or 5, will be set later */
  2173. .lep = 2,
  2174. .eptype = EP_BLK_TYPE,
  2175. },
  2176. .ep[3] = {
  2177. .ep = {
  2178. .name = "ep3-iso",
  2179. .ops = &lpc32xx_ep_ops,
  2180. },
  2181. .maxpacket = 1023,
  2182. .hwep_num_base = 6,
  2183. .hwep_num = 0, /* 6 or 7, will be set later */
  2184. .lep = 3,
  2185. .eptype = EP_ISO_TYPE,
  2186. },
  2187. .ep[4] = {
  2188. .ep = {
  2189. .name = "ep4-int",
  2190. .ops = &lpc32xx_ep_ops,
  2191. },
  2192. .maxpacket = 64,
  2193. .hwep_num_base = 8,
  2194. .hwep_num = 0, /* 8 or 9, will be set later */
  2195. .lep = 4,
  2196. .eptype = EP_INT_TYPE,
  2197. },
  2198. .ep[5] = {
  2199. .ep = {
  2200. .name = "ep5-bulk",
  2201. .ops = &lpc32xx_ep_ops,
  2202. },
  2203. .maxpacket = 64,
  2204. .hwep_num_base = 10,
  2205. .hwep_num = 0, /* 10 or 11, will be set later */
  2206. .lep = 5,
  2207. .eptype = EP_BLK_TYPE,
  2208. },
  2209. .ep[6] = {
  2210. .ep = {
  2211. .name = "ep6-iso",
  2212. .ops = &lpc32xx_ep_ops,
  2213. },
  2214. .maxpacket = 1023,
  2215. .hwep_num_base = 12,
  2216. .hwep_num = 0, /* 12 or 13, will be set later */
  2217. .lep = 6,
  2218. .eptype = EP_ISO_TYPE,
  2219. },
  2220. .ep[7] = {
  2221. .ep = {
  2222. .name = "ep7-int",
  2223. .ops = &lpc32xx_ep_ops,
  2224. },
  2225. .maxpacket = 64,
  2226. .hwep_num_base = 14,
  2227. .hwep_num = 0,
  2228. .lep = 7,
  2229. .eptype = EP_INT_TYPE,
  2230. },
  2231. .ep[8] = {
  2232. .ep = {
  2233. .name = "ep8-bulk",
  2234. .ops = &lpc32xx_ep_ops,
  2235. },
  2236. .maxpacket = 64,
  2237. .hwep_num_base = 16,
  2238. .hwep_num = 0,
  2239. .lep = 8,
  2240. .eptype = EP_BLK_TYPE,
  2241. },
  2242. .ep[9] = {
  2243. .ep = {
  2244. .name = "ep9-iso",
  2245. .ops = &lpc32xx_ep_ops,
  2246. },
  2247. .maxpacket = 1023,
  2248. .hwep_num_base = 18,
  2249. .hwep_num = 0,
  2250. .lep = 9,
  2251. .eptype = EP_ISO_TYPE,
  2252. },
  2253. .ep[10] = {
  2254. .ep = {
  2255. .name = "ep10-int",
  2256. .ops = &lpc32xx_ep_ops,
  2257. },
  2258. .maxpacket = 64,
  2259. .hwep_num_base = 20,
  2260. .hwep_num = 0,
  2261. .lep = 10,
  2262. .eptype = EP_INT_TYPE,
  2263. },
  2264. .ep[11] = {
  2265. .ep = {
  2266. .name = "ep11-bulk",
  2267. .ops = &lpc32xx_ep_ops,
  2268. },
  2269. .maxpacket = 64,
  2270. .hwep_num_base = 22,
  2271. .hwep_num = 0,
  2272. .lep = 11,
  2273. .eptype = EP_BLK_TYPE,
  2274. },
  2275. .ep[12] = {
  2276. .ep = {
  2277. .name = "ep12-iso",
  2278. .ops = &lpc32xx_ep_ops,
  2279. },
  2280. .maxpacket = 1023,
  2281. .hwep_num_base = 24,
  2282. .hwep_num = 0,
  2283. .lep = 12,
  2284. .eptype = EP_ISO_TYPE,
  2285. },
  2286. .ep[13] = {
  2287. .ep = {
  2288. .name = "ep13-int",
  2289. .ops = &lpc32xx_ep_ops,
  2290. },
  2291. .maxpacket = 64,
  2292. .hwep_num_base = 26,
  2293. .hwep_num = 0,
  2294. .lep = 13,
  2295. .eptype = EP_INT_TYPE,
  2296. },
  2297. .ep[14] = {
  2298. .ep = {
  2299. .name = "ep14-bulk",
  2300. .ops = &lpc32xx_ep_ops,
  2301. },
  2302. .maxpacket = 64,
  2303. .hwep_num_base = 28,
  2304. .hwep_num = 0,
  2305. .lep = 14,
  2306. .eptype = EP_BLK_TYPE,
  2307. },
  2308. .ep[15] = {
  2309. .ep = {
  2310. .name = "ep15-bulk",
  2311. .ops = &lpc32xx_ep_ops,
  2312. },
  2313. .maxpacket = 1023,
  2314. .hwep_num_base = 30,
  2315. .hwep_num = 0,
  2316. .lep = 15,
  2317. .eptype = EP_BLK_TYPE,
  2318. },
  2319. };
  2320. /* ISO and status interrupts */
  2321. static irqreturn_t lpc32xx_usb_lp_irq(int irq, void *_udc)
  2322. {
  2323. u32 tmp, devstat;
  2324. struct lpc32xx_udc *udc = _udc;
  2325. spin_lock(&udc->lock);
  2326. /* Read the device status register */
  2327. devstat = readl(USBD_DEVINTST(udc->udp_baseaddr));
  2328. devstat &= ~USBD_EP_FAST;
  2329. writel(devstat, USBD_DEVINTCLR(udc->udp_baseaddr));
  2330. devstat = devstat & udc->enabled_devints;
  2331. /* Device specific handling needed? */
  2332. if (devstat & USBD_DEV_STAT)
  2333. udc_handle_dev(udc);
  2334. /* Start of frame? (devstat & FRAME_INT):
  2335. * The frame interrupt isn't really needed for ISO support,
  2336. * as the driver will queue the necessary packets */
  2337. /* Error? */
  2338. if (devstat & ERR_INT) {
  2339. /* All types of errors, from cable removal during transfer to
  2340. * misc protocol and bit errors. These are mostly for just info,
  2341. * as the USB hardware will work around these. If these errors
  2342. * happen alot, something is wrong. */
  2343. udc_protocol_cmd_w(udc, CMD_RD_ERR_STAT);
  2344. tmp = udc_protocol_cmd_r(udc, DAT_RD_ERR_STAT);
  2345. dev_dbg(udc->dev, "Device error (0x%x)!\n", tmp);
  2346. }
  2347. spin_unlock(&udc->lock);
  2348. return IRQ_HANDLED;
  2349. }
  2350. /* EP interrupts */
  2351. static irqreturn_t lpc32xx_usb_hp_irq(int irq, void *_udc)
  2352. {
  2353. u32 tmp;
  2354. struct lpc32xx_udc *udc = _udc;
  2355. spin_lock(&udc->lock);
  2356. /* Read the device status register */
  2357. writel(USBD_EP_FAST, USBD_DEVINTCLR(udc->udp_baseaddr));
  2358. /* Endpoints */
  2359. tmp = readl(USBD_EPINTST(udc->udp_baseaddr));
  2360. /* Special handling for EP0 */
  2361. if (tmp & (EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
  2362. /* Handle EP0 IN */
  2363. if (tmp & (EP_MASK_SEL(0, EP_IN)))
  2364. udc_handle_ep0_in(udc);
  2365. /* Handle EP0 OUT */
  2366. if (tmp & (EP_MASK_SEL(0, EP_OUT)))
  2367. udc_handle_ep0_out(udc);
  2368. }
  2369. /* All other EPs */
  2370. if (tmp & ~(EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
  2371. int i;
  2372. /* Handle other EP interrupts */
  2373. for (i = 1; i < NUM_ENDPOINTS; i++) {
  2374. if (tmp & (1 << udc->ep[i].hwep_num))
  2375. udc_handle_eps(udc, &udc->ep[i]);
  2376. }
  2377. }
  2378. spin_unlock(&udc->lock);
  2379. return IRQ_HANDLED;
  2380. }
  2381. static irqreturn_t lpc32xx_usb_devdma_irq(int irq, void *_udc)
  2382. {
  2383. struct lpc32xx_udc *udc = _udc;
  2384. int i;
  2385. u32 tmp;
  2386. spin_lock(&udc->lock);
  2387. /* Handle EP DMA EOT interrupts */
  2388. tmp = readl(USBD_EOTINTST(udc->udp_baseaddr)) |
  2389. (readl(USBD_EPDMAST(udc->udp_baseaddr)) &
  2390. readl(USBD_NDDRTINTST(udc->udp_baseaddr))) |
  2391. readl(USBD_SYSERRTINTST(udc->udp_baseaddr));
  2392. for (i = 1; i < NUM_ENDPOINTS; i++) {
  2393. if (tmp & (1 << udc->ep[i].hwep_num))
  2394. udc_handle_dma_ep(udc, &udc->ep[i]);
  2395. }
  2396. spin_unlock(&udc->lock);
  2397. return IRQ_HANDLED;
  2398. }
  2399. /*
  2400. *
  2401. * VBUS detection, pullup handler, and Gadget cable state notification
  2402. *
  2403. */
  2404. static void vbus_work(struct work_struct *work)
  2405. {
  2406. u8 value;
  2407. struct lpc32xx_udc *udc = container_of(work, struct lpc32xx_udc,
  2408. vbus_job);
  2409. if (udc->enabled != 0) {
  2410. /* Discharge VBUS real quick */
  2411. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  2412. ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
  2413. /* Give VBUS some time (100mS) to discharge */
  2414. msleep(100);
  2415. /* Disable VBUS discharge resistor */
  2416. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  2417. ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
  2418. OTG1_VBUS_DISCHRG);
  2419. /* Clear interrupt */
  2420. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  2421. ISP1301_I2C_INTERRUPT_LATCH |
  2422. ISP1301_I2C_REG_CLEAR_ADDR, ~0);
  2423. /* Get the VBUS status from the transceiver */
  2424. value = i2c_smbus_read_byte_data(udc->isp1301_i2c_client,
  2425. ISP1301_I2C_INTERRUPT_SOURCE);
  2426. /* VBUS on or off? */
  2427. if (value & INT_SESS_VLD)
  2428. udc->vbus = 1;
  2429. else
  2430. udc->vbus = 0;
  2431. /* VBUS changed? */
  2432. if (udc->last_vbus != udc->vbus) {
  2433. udc->last_vbus = udc->vbus;
  2434. lpc32xx_vbus_session(&udc->gadget, udc->vbus);
  2435. }
  2436. }
  2437. /* Re-enable after completion */
  2438. enable_irq(udc->udp_irq[IRQ_USB_ATX]);
  2439. }
  2440. static irqreturn_t lpc32xx_usb_vbus_irq(int irq, void *_udc)
  2441. {
  2442. struct lpc32xx_udc *udc = _udc;
  2443. /* Defer handling of VBUS IRQ to work queue */
  2444. disable_irq_nosync(udc->udp_irq[IRQ_USB_ATX]);
  2445. schedule_work(&udc->vbus_job);
  2446. return IRQ_HANDLED;
  2447. }
  2448. static int lpc32xx_start(struct usb_gadget *gadget,
  2449. struct usb_gadget_driver *driver)
  2450. {
  2451. struct lpc32xx_udc *udc = to_udc(gadget);
  2452. int i;
  2453. if (!driver || driver->max_speed < USB_SPEED_FULL || !driver->setup) {
  2454. dev_err(udc->dev, "bad parameter.\n");
  2455. return -EINVAL;
  2456. }
  2457. if (udc->driver) {
  2458. dev_err(udc->dev, "UDC already has a gadget driver\n");
  2459. return -EBUSY;
  2460. }
  2461. udc->driver = driver;
  2462. udc->gadget.dev.of_node = udc->dev->of_node;
  2463. udc->enabled = 1;
  2464. udc->gadget.is_selfpowered = 1;
  2465. udc->vbus = 0;
  2466. /* Force VBUS process once to check for cable insertion */
  2467. udc->last_vbus = udc->vbus = 0;
  2468. schedule_work(&udc->vbus_job);
  2469. /* Do not re-enable ATX IRQ (3) */
  2470. for (i = IRQ_USB_LP; i < IRQ_USB_ATX; i++)
  2471. enable_irq(udc->udp_irq[i]);
  2472. return 0;
  2473. }
  2474. static int lpc32xx_stop(struct usb_gadget *gadget)
  2475. {
  2476. int i;
  2477. struct lpc32xx_udc *udc = to_udc(gadget);
  2478. for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
  2479. disable_irq(udc->udp_irq[i]);
  2480. if (udc->clocked) {
  2481. spin_lock(&udc->lock);
  2482. stop_activity(udc);
  2483. spin_unlock(&udc->lock);
  2484. /*
  2485. * Wait for all the endpoints to disable,
  2486. * before disabling clocks. Don't wait if
  2487. * endpoints are not enabled.
  2488. */
  2489. if (atomic_read(&udc->enabled_ep_cnt))
  2490. wait_event_interruptible(udc->ep_disable_wait_queue,
  2491. (atomic_read(&udc->enabled_ep_cnt) == 0));
  2492. spin_lock(&udc->lock);
  2493. udc_clk_set(udc, 0);
  2494. spin_unlock(&udc->lock);
  2495. }
  2496. udc->enabled = 0;
  2497. udc->driver = NULL;
  2498. return 0;
  2499. }
  2500. static void lpc32xx_udc_shutdown(struct platform_device *dev)
  2501. {
  2502. /* Force disconnect on reboot */
  2503. struct lpc32xx_udc *udc = platform_get_drvdata(dev);
  2504. pullup(udc, 0);
  2505. }
  2506. /*
  2507. * Callbacks to be overridden by options passed via OF (TODO)
  2508. */
  2509. static void lpc32xx_usbd_conn_chg(int conn)
  2510. {
  2511. /* Do nothing, it might be nice to enable an LED
  2512. * based on conn state being !0 */
  2513. }
  2514. static void lpc32xx_usbd_susp_chg(int susp)
  2515. {
  2516. /* Device suspend if susp != 0 */
  2517. }
  2518. static void lpc32xx_rmwkup_chg(int remote_wakup_enable)
  2519. {
  2520. /* Enable or disable USB remote wakeup */
  2521. }
  2522. struct lpc32xx_usbd_cfg lpc32xx_usbddata = {
  2523. .vbus_drv_pol = 0,
  2524. .conn_chgb = &lpc32xx_usbd_conn_chg,
  2525. .susp_chgb = &lpc32xx_usbd_susp_chg,
  2526. .rmwk_chgb = &lpc32xx_rmwkup_chg,
  2527. };
  2528. static u64 lpc32xx_usbd_dmamask = ~(u32) 0x7F;
  2529. static int lpc32xx_udc_probe(struct platform_device *pdev)
  2530. {
  2531. struct device *dev = &pdev->dev;
  2532. struct lpc32xx_udc *udc;
  2533. int retval, i;
  2534. struct resource *res;
  2535. dma_addr_t dma_handle;
  2536. struct device_node *isp1301_node;
  2537. udc = kmemdup(&controller_template, sizeof(*udc), GFP_KERNEL);
  2538. if (!udc)
  2539. return -ENOMEM;
  2540. for (i = 0; i <= 15; i++)
  2541. udc->ep[i].udc = udc;
  2542. udc->gadget.ep0 = &udc->ep[0].ep;
  2543. /* init software state */
  2544. udc->gadget.dev.parent = dev;
  2545. udc->pdev = pdev;
  2546. udc->dev = &pdev->dev;
  2547. udc->enabled = 0;
  2548. if (pdev->dev.of_node) {
  2549. isp1301_node = of_parse_phandle(pdev->dev.of_node,
  2550. "transceiver", 0);
  2551. } else {
  2552. isp1301_node = NULL;
  2553. }
  2554. udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
  2555. if (!udc->isp1301_i2c_client) {
  2556. retval = -EPROBE_DEFER;
  2557. goto phy_fail;
  2558. }
  2559. dev_info(udc->dev, "ISP1301 I2C device at address 0x%x\n",
  2560. udc->isp1301_i2c_client->addr);
  2561. pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
  2562. retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
  2563. if (retval)
  2564. goto resource_fail;
  2565. udc->board = &lpc32xx_usbddata;
  2566. /*
  2567. * Resources are mapped as follows:
  2568. * IORESOURCE_MEM, base address and size of USB space
  2569. * IORESOURCE_IRQ, USB device low priority interrupt number
  2570. * IORESOURCE_IRQ, USB device high priority interrupt number
  2571. * IORESOURCE_IRQ, USB device interrupt number
  2572. * IORESOURCE_IRQ, USB transceiver interrupt number
  2573. */
  2574. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2575. if (!res) {
  2576. retval = -ENXIO;
  2577. goto resource_fail;
  2578. }
  2579. spin_lock_init(&udc->lock);
  2580. /* Get IRQs */
  2581. for (i = 0; i < 4; i++) {
  2582. udc->udp_irq[i] = platform_get_irq(pdev, i);
  2583. if (udc->udp_irq[i] < 0) {
  2584. dev_err(udc->dev,
  2585. "irq resource %d not available!\n", i);
  2586. retval = udc->udp_irq[i];
  2587. goto irq_fail;
  2588. }
  2589. }
  2590. udc->io_p_start = res->start;
  2591. udc->io_p_size = resource_size(res);
  2592. if (!request_mem_region(udc->io_p_start, udc->io_p_size, driver_name)) {
  2593. dev_err(udc->dev, "someone's using UDC memory\n");
  2594. retval = -EBUSY;
  2595. goto request_mem_region_fail;
  2596. }
  2597. udc->udp_baseaddr = ioremap(udc->io_p_start, udc->io_p_size);
  2598. if (!udc->udp_baseaddr) {
  2599. retval = -ENOMEM;
  2600. dev_err(udc->dev, "IO map failure\n");
  2601. goto io_map_fail;
  2602. }
  2603. /* Enable AHB slave USB clock, needed for further USB clock control */
  2604. writel(USB_SLAVE_HCLK_EN | (1 << 19), USB_CTRL);
  2605. /* Get required clocks */
  2606. udc->usb_pll_clk = clk_get(&pdev->dev, "ck_pll5");
  2607. if (IS_ERR(udc->usb_pll_clk)) {
  2608. dev_err(udc->dev, "failed to acquire USB PLL\n");
  2609. retval = PTR_ERR(udc->usb_pll_clk);
  2610. goto pll_get_fail;
  2611. }
  2612. udc->usb_slv_clk = clk_get(&pdev->dev, "ck_usbd");
  2613. if (IS_ERR(udc->usb_slv_clk)) {
  2614. dev_err(udc->dev, "failed to acquire USB device clock\n");
  2615. retval = PTR_ERR(udc->usb_slv_clk);
  2616. goto usb_clk_get_fail;
  2617. }
  2618. udc->usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg");
  2619. if (IS_ERR(udc->usb_otg_clk)) {
  2620. dev_err(udc->dev, "failed to acquire USB otg clock\n");
  2621. retval = PTR_ERR(udc->usb_otg_clk);
  2622. goto usb_otg_clk_get_fail;
  2623. }
  2624. /* Setup PLL clock to 48MHz */
  2625. retval = clk_enable(udc->usb_pll_clk);
  2626. if (retval < 0) {
  2627. dev_err(udc->dev, "failed to start USB PLL\n");
  2628. goto pll_enable_fail;
  2629. }
  2630. retval = clk_set_rate(udc->usb_pll_clk, 48000);
  2631. if (retval < 0) {
  2632. dev_err(udc->dev, "failed to set USB clock rate\n");
  2633. goto pll_set_fail;
  2634. }
  2635. writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN, USB_CTRL);
  2636. /* Enable USB device clock */
  2637. retval = clk_enable(udc->usb_slv_clk);
  2638. if (retval < 0) {
  2639. dev_err(udc->dev, "failed to start USB device clock\n");
  2640. goto usb_clk_enable_fail;
  2641. }
  2642. /* Enable USB OTG clock */
  2643. retval = clk_enable(udc->usb_otg_clk);
  2644. if (retval < 0) {
  2645. dev_err(udc->dev, "failed to start USB otg clock\n");
  2646. goto usb_otg_clk_enable_fail;
  2647. }
  2648. /* Setup deferred workqueue data */
  2649. udc->poweron = udc->pullup = 0;
  2650. INIT_WORK(&udc->pullup_job, pullup_work);
  2651. INIT_WORK(&udc->vbus_job, vbus_work);
  2652. #ifdef CONFIG_PM
  2653. INIT_WORK(&udc->power_job, power_work);
  2654. #endif
  2655. /* All clocks are now on */
  2656. udc->clocked = 1;
  2657. isp1301_udc_configure(udc);
  2658. /* Allocate memory for the UDCA */
  2659. udc->udca_v_base = dma_alloc_coherent(&pdev->dev, UDCA_BUFF_SIZE,
  2660. &dma_handle,
  2661. (GFP_KERNEL | GFP_DMA));
  2662. if (!udc->udca_v_base) {
  2663. dev_err(udc->dev, "error getting UDCA region\n");
  2664. retval = -ENOMEM;
  2665. goto i2c_fail;
  2666. }
  2667. udc->udca_p_base = dma_handle;
  2668. dev_dbg(udc->dev, "DMA buffer(0x%x bytes), P:0x%08x, V:0x%p\n",
  2669. UDCA_BUFF_SIZE, udc->udca_p_base, udc->udca_v_base);
  2670. /* Setup the DD DMA memory pool */
  2671. udc->dd_cache = dma_pool_create("udc_dd", udc->dev,
  2672. sizeof(struct lpc32xx_usbd_dd_gad),
  2673. sizeof(u32), 0);
  2674. if (!udc->dd_cache) {
  2675. dev_err(udc->dev, "error getting DD DMA region\n");
  2676. retval = -ENOMEM;
  2677. goto dma_alloc_fail;
  2678. }
  2679. /* Clear USB peripheral and initialize gadget endpoints */
  2680. udc_disable(udc);
  2681. udc_reinit(udc);
  2682. /* Request IRQs - low and high priority USB device IRQs are routed to
  2683. * the same handler, while the DMA interrupt is routed elsewhere */
  2684. retval = request_irq(udc->udp_irq[IRQ_USB_LP], lpc32xx_usb_lp_irq,
  2685. 0, "udc_lp", udc);
  2686. if (retval < 0) {
  2687. dev_err(udc->dev, "LP request irq %d failed\n",
  2688. udc->udp_irq[IRQ_USB_LP]);
  2689. goto irq_lp_fail;
  2690. }
  2691. retval = request_irq(udc->udp_irq[IRQ_USB_HP], lpc32xx_usb_hp_irq,
  2692. 0, "udc_hp", udc);
  2693. if (retval < 0) {
  2694. dev_err(udc->dev, "HP request irq %d failed\n",
  2695. udc->udp_irq[IRQ_USB_HP]);
  2696. goto irq_hp_fail;
  2697. }
  2698. retval = request_irq(udc->udp_irq[IRQ_USB_DEVDMA],
  2699. lpc32xx_usb_devdma_irq, 0, "udc_dma", udc);
  2700. if (retval < 0) {
  2701. dev_err(udc->dev, "DEV request irq %d failed\n",
  2702. udc->udp_irq[IRQ_USB_DEVDMA]);
  2703. goto irq_dev_fail;
  2704. }
  2705. /* The transceiver interrupt is used for VBUS detection and will
  2706. kick off the VBUS handler function */
  2707. retval = request_irq(udc->udp_irq[IRQ_USB_ATX], lpc32xx_usb_vbus_irq,
  2708. 0, "udc_otg", udc);
  2709. if (retval < 0) {
  2710. dev_err(udc->dev, "VBUS request irq %d failed\n",
  2711. udc->udp_irq[IRQ_USB_ATX]);
  2712. goto irq_xcvr_fail;
  2713. }
  2714. /* Initialize wait queue */
  2715. init_waitqueue_head(&udc->ep_disable_wait_queue);
  2716. atomic_set(&udc->enabled_ep_cnt, 0);
  2717. /* Keep all IRQs disabled until GadgetFS starts up */
  2718. for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
  2719. disable_irq(udc->udp_irq[i]);
  2720. retval = usb_add_gadget_udc(dev, &udc->gadget);
  2721. if (retval < 0)
  2722. goto add_gadget_fail;
  2723. dev_set_drvdata(dev, udc);
  2724. device_init_wakeup(dev, 1);
  2725. create_debug_file(udc);
  2726. /* Disable clocks for now */
  2727. udc_clk_set(udc, 0);
  2728. dev_info(udc->dev, "%s version %s\n", driver_name, DRIVER_VERSION);
  2729. return 0;
  2730. add_gadget_fail:
  2731. free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
  2732. irq_xcvr_fail:
  2733. free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
  2734. irq_dev_fail:
  2735. free_irq(udc->udp_irq[IRQ_USB_HP], udc);
  2736. irq_hp_fail:
  2737. free_irq(udc->udp_irq[IRQ_USB_LP], udc);
  2738. irq_lp_fail:
  2739. dma_pool_destroy(udc->dd_cache);
  2740. dma_alloc_fail:
  2741. dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
  2742. udc->udca_v_base, udc->udca_p_base);
  2743. i2c_fail:
  2744. clk_disable(udc->usb_otg_clk);
  2745. usb_otg_clk_enable_fail:
  2746. clk_disable(udc->usb_slv_clk);
  2747. usb_clk_enable_fail:
  2748. pll_set_fail:
  2749. clk_disable(udc->usb_pll_clk);
  2750. pll_enable_fail:
  2751. clk_put(udc->usb_otg_clk);
  2752. usb_otg_clk_get_fail:
  2753. clk_put(udc->usb_slv_clk);
  2754. usb_clk_get_fail:
  2755. clk_put(udc->usb_pll_clk);
  2756. pll_get_fail:
  2757. iounmap(udc->udp_baseaddr);
  2758. io_map_fail:
  2759. release_mem_region(udc->io_p_start, udc->io_p_size);
  2760. dev_err(udc->dev, "%s probe failed, %d\n", driver_name, retval);
  2761. request_mem_region_fail:
  2762. irq_fail:
  2763. resource_fail:
  2764. phy_fail:
  2765. kfree(udc);
  2766. return retval;
  2767. }
  2768. static int lpc32xx_udc_remove(struct platform_device *pdev)
  2769. {
  2770. struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
  2771. usb_del_gadget_udc(&udc->gadget);
  2772. if (udc->driver)
  2773. return -EBUSY;
  2774. udc_clk_set(udc, 1);
  2775. udc_disable(udc);
  2776. pullup(udc, 0);
  2777. free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
  2778. device_init_wakeup(&pdev->dev, 0);
  2779. remove_debug_file(udc);
  2780. dma_pool_destroy(udc->dd_cache);
  2781. dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
  2782. udc->udca_v_base, udc->udca_p_base);
  2783. free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
  2784. free_irq(udc->udp_irq[IRQ_USB_HP], udc);
  2785. free_irq(udc->udp_irq[IRQ_USB_LP], udc);
  2786. clk_disable(udc->usb_otg_clk);
  2787. clk_put(udc->usb_otg_clk);
  2788. clk_disable(udc->usb_slv_clk);
  2789. clk_put(udc->usb_slv_clk);
  2790. clk_disable(udc->usb_pll_clk);
  2791. clk_put(udc->usb_pll_clk);
  2792. iounmap(udc->udp_baseaddr);
  2793. release_mem_region(udc->io_p_start, udc->io_p_size);
  2794. kfree(udc);
  2795. return 0;
  2796. }
  2797. #ifdef CONFIG_PM
  2798. static int lpc32xx_udc_suspend(struct platform_device *pdev, pm_message_t mesg)
  2799. {
  2800. struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
  2801. if (udc->clocked) {
  2802. /* Power down ISP */
  2803. udc->poweron = 0;
  2804. isp1301_set_powerstate(udc, 0);
  2805. /* Disable clocking */
  2806. udc_clk_set(udc, 0);
  2807. /* Keep clock flag on, so we know to re-enable clocks
  2808. on resume */
  2809. udc->clocked = 1;
  2810. /* Kill global USB clock */
  2811. clk_disable(udc->usb_slv_clk);
  2812. }
  2813. return 0;
  2814. }
  2815. static int lpc32xx_udc_resume(struct platform_device *pdev)
  2816. {
  2817. struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
  2818. if (udc->clocked) {
  2819. /* Enable global USB clock */
  2820. clk_enable(udc->usb_slv_clk);
  2821. /* Enable clocking */
  2822. udc_clk_set(udc, 1);
  2823. /* ISP back to normal power mode */
  2824. udc->poweron = 1;
  2825. isp1301_set_powerstate(udc, 1);
  2826. }
  2827. return 0;
  2828. }
  2829. #else
  2830. #define lpc32xx_udc_suspend NULL
  2831. #define lpc32xx_udc_resume NULL
  2832. #endif
  2833. #ifdef CONFIG_OF
  2834. static const struct of_device_id lpc32xx_udc_of_match[] = {
  2835. { .compatible = "nxp,lpc3220-udc", },
  2836. { },
  2837. };
  2838. MODULE_DEVICE_TABLE(of, lpc32xx_udc_of_match);
  2839. #endif
  2840. static struct platform_driver lpc32xx_udc_driver = {
  2841. .remove = lpc32xx_udc_remove,
  2842. .shutdown = lpc32xx_udc_shutdown,
  2843. .suspend = lpc32xx_udc_suspend,
  2844. .resume = lpc32xx_udc_resume,
  2845. .driver = {
  2846. .name = (char *) driver_name,
  2847. .of_match_table = of_match_ptr(lpc32xx_udc_of_match),
  2848. },
  2849. };
  2850. module_platform_driver_probe(lpc32xx_udc_driver, lpc32xx_udc_probe);
  2851. MODULE_DESCRIPTION("LPC32XX udc driver");
  2852. MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
  2853. MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
  2854. MODULE_LICENSE("GPL");
  2855. MODULE_ALIAS("platform:lpc32xx_udc");