lpc32xx_udc.c 85 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418
  1. /*
  2. * USB Gadget driver for LPC32xx
  3. *
  4. * Authors:
  5. * Kevin Wells <kevin.wells@nxp.com>
  6. * Mike James
  7. * Roland Stigge <stigge@antcom.de>
  8. *
  9. * Copyright (C) 2006 Philips Semiconductors
  10. * Copyright (C) 2009 NXP Semiconductors
  11. * Copyright (C) 2012 Roland Stigge
  12. *
  13. * Note: This driver is based on original work done by Mike James for
  14. * the LPC3180.
  15. *
  16. * This program is free software; you can redistribute it and/or modify
  17. * it under the terms of the GNU General Public License as published by
  18. * the Free Software Foundation; either version 2 of the License, or
  19. * (at your option) any later version.
  20. *
  21. * This program is distributed in the hope that it will be useful,
  22. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  23. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  24. * GNU General Public License for more details.
  25. *
  26. * You should have received a copy of the GNU General Public License
  27. * along with this program; if not, write to the Free Software
  28. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/platform_device.h>
  33. #include <linux/delay.h>
  34. #include <linux/ioport.h>
  35. #include <linux/slab.h>
  36. #include <linux/errno.h>
  37. #include <linux/init.h>
  38. #include <linux/list.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/proc_fs.h>
  41. #include <linux/clk.h>
  42. #include <linux/usb/ch9.h>
  43. #include <linux/usb/gadget.h>
  44. #include <linux/i2c.h>
  45. #include <linux/kthread.h>
  46. #include <linux/freezer.h>
  47. #include <linux/dma-mapping.h>
  48. #include <linux/dmapool.h>
  49. #include <linux/workqueue.h>
  50. #include <linux/of.h>
  51. #include <linux/usb/isp1301.h>
  52. #include <asm/byteorder.h>
  53. #include <mach/hardware.h>
  54. #include <linux/io.h>
  55. #include <asm/irq.h>
  56. #include <mach/platform.h>
  57. #include <mach/irqs.h>
  58. #include <mach/board.h>
  59. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  60. #include <linux/debugfs.h>
  61. #include <linux/seq_file.h>
  62. #endif
  63. /*
  64. * USB device configuration structure
  65. */
  66. typedef void (*usc_chg_event)(int);
  67. struct lpc32xx_usbd_cfg {
  68. int vbus_drv_pol; /* 0=active low drive for VBUS via ISP1301 */
  69. usc_chg_event conn_chgb; /* Connection change event (optional) */
  70. usc_chg_event susp_chgb; /* Suspend/resume event (optional) */
  71. usc_chg_event rmwk_chgb; /* Enable/disable remote wakeup */
  72. };
  73. /*
  74. * controller driver data structures
  75. */
  76. /* 16 endpoints (not to be confused with 32 hardware endpoints) */
  77. #define NUM_ENDPOINTS 16
  78. /*
  79. * IRQ indices make reading the code a little easier
  80. */
  81. #define IRQ_USB_LP 0
  82. #define IRQ_USB_HP 1
  83. #define IRQ_USB_DEVDMA 2
  84. #define IRQ_USB_ATX 3
  85. #define EP_OUT 0 /* RX (from host) */
  86. #define EP_IN 1 /* TX (to host) */
  87. /* Returns the interrupt mask for the selected hardware endpoint */
  88. #define EP_MASK_SEL(ep, dir) (1 << (((ep) * 2) + dir))
  89. #define EP_INT_TYPE 0
  90. #define EP_ISO_TYPE 1
  91. #define EP_BLK_TYPE 2
  92. #define EP_CTL_TYPE 3
  93. /* EP0 states */
  94. #define WAIT_FOR_SETUP 0 /* Wait for setup packet */
  95. #define DATA_IN 1 /* Expect dev->host transfer */
  96. #define DATA_OUT 2 /* Expect host->dev transfer */
  97. /* DD (DMA Descriptor) structure, requires word alignment, this is already
  98. * defined in the LPC32XX USB device header file, but this version is slightly
  99. * modified to tag some work data with each DMA descriptor. */
  100. struct lpc32xx_usbd_dd_gad {
  101. u32 dd_next_phy;
  102. u32 dd_setup;
  103. u32 dd_buffer_addr;
  104. u32 dd_status;
  105. u32 dd_iso_ps_mem_addr;
  106. u32 this_dma;
  107. u32 iso_status[6]; /* 5 spare */
  108. u32 dd_next_v;
  109. };
  110. /*
  111. * Logical endpoint structure
  112. */
  113. struct lpc32xx_ep {
  114. struct usb_ep ep;
  115. struct list_head queue;
  116. struct lpc32xx_udc *udc;
  117. u32 hwep_num_base; /* Physical hardware EP */
  118. u32 hwep_num; /* Maps to hardware endpoint */
  119. u32 maxpacket;
  120. u32 lep;
  121. bool is_in;
  122. bool req_pending;
  123. u32 eptype;
  124. u32 totalints;
  125. bool wedge;
  126. };
  127. /*
  128. * Common UDC structure
  129. */
  130. struct lpc32xx_udc {
  131. struct usb_gadget gadget;
  132. struct usb_gadget_driver *driver;
  133. struct platform_device *pdev;
  134. struct device *dev;
  135. struct dentry *pde;
  136. spinlock_t lock;
  137. struct i2c_client *isp1301_i2c_client;
  138. /* Board and device specific */
  139. struct lpc32xx_usbd_cfg *board;
  140. u32 io_p_start;
  141. u32 io_p_size;
  142. void __iomem *udp_baseaddr;
  143. int udp_irq[4];
  144. struct clk *usb_pll_clk;
  145. struct clk *usb_slv_clk;
  146. struct clk *usb_otg_clk;
  147. /* DMA support */
  148. u32 *udca_v_base;
  149. u32 udca_p_base;
  150. struct dma_pool *dd_cache;
  151. /* Common EP and control data */
  152. u32 enabled_devints;
  153. u32 enabled_hwepints;
  154. u32 dev_status;
  155. u32 realized_eps;
  156. /* VBUS detection, pullup, and power flags */
  157. u8 vbus;
  158. u8 last_vbus;
  159. int pullup;
  160. int poweron;
  161. /* Work queues related to I2C support */
  162. struct work_struct pullup_job;
  163. struct work_struct vbus_job;
  164. struct work_struct power_job;
  165. /* USB device peripheral - various */
  166. struct lpc32xx_ep ep[NUM_ENDPOINTS];
  167. bool enabled;
  168. bool clocked;
  169. bool suspended;
  170. bool selfpowered;
  171. int ep0state;
  172. atomic_t enabled_ep_cnt;
  173. wait_queue_head_t ep_disable_wait_queue;
  174. };
  175. /*
  176. * Endpoint request
  177. */
  178. struct lpc32xx_request {
  179. struct usb_request req;
  180. struct list_head queue;
  181. struct lpc32xx_usbd_dd_gad *dd_desc_ptr;
  182. bool mapped;
  183. bool send_zlp;
  184. };
  185. static inline struct lpc32xx_udc *to_udc(struct usb_gadget *g)
  186. {
  187. return container_of(g, struct lpc32xx_udc, gadget);
  188. }
  189. #define ep_dbg(epp, fmt, arg...) \
  190. dev_dbg(epp->udc->dev, "%s: " fmt, __func__, ## arg)
  191. #define ep_err(epp, fmt, arg...) \
  192. dev_err(epp->udc->dev, "%s: " fmt, __func__, ## arg)
  193. #define ep_info(epp, fmt, arg...) \
  194. dev_info(epp->udc->dev, "%s: " fmt, __func__, ## arg)
  195. #define ep_warn(epp, fmt, arg...) \
  196. dev_warn(epp->udc->dev, "%s:" fmt, __func__, ## arg)
  197. #define UDCA_BUFF_SIZE (128)
  198. /* TODO: When the clock framework is introduced in LPC32xx, IO_ADDRESS will
  199. * be replaced with an inremap()ed pointer
  200. * */
  201. #define USB_CTRL IO_ADDRESS(LPC32XX_CLK_PM_BASE + 0x64)
  202. /* USB_CTRL bit defines */
  203. #define USB_SLAVE_HCLK_EN (1 << 24)
  204. #define USB_HOST_NEED_CLK_EN (1 << 21)
  205. #define USB_DEV_NEED_CLK_EN (1 << 22)
  206. /**********************************************************************
  207. * USB device controller register offsets
  208. **********************************************************************/
  209. #define USBD_DEVINTST(x) ((x) + 0x200)
  210. #define USBD_DEVINTEN(x) ((x) + 0x204)
  211. #define USBD_DEVINTCLR(x) ((x) + 0x208)
  212. #define USBD_DEVINTSET(x) ((x) + 0x20C)
  213. #define USBD_CMDCODE(x) ((x) + 0x210)
  214. #define USBD_CMDDATA(x) ((x) + 0x214)
  215. #define USBD_RXDATA(x) ((x) + 0x218)
  216. #define USBD_TXDATA(x) ((x) + 0x21C)
  217. #define USBD_RXPLEN(x) ((x) + 0x220)
  218. #define USBD_TXPLEN(x) ((x) + 0x224)
  219. #define USBD_CTRL(x) ((x) + 0x228)
  220. #define USBD_DEVINTPRI(x) ((x) + 0x22C)
  221. #define USBD_EPINTST(x) ((x) + 0x230)
  222. #define USBD_EPINTEN(x) ((x) + 0x234)
  223. #define USBD_EPINTCLR(x) ((x) + 0x238)
  224. #define USBD_EPINTSET(x) ((x) + 0x23C)
  225. #define USBD_EPINTPRI(x) ((x) + 0x240)
  226. #define USBD_REEP(x) ((x) + 0x244)
  227. #define USBD_EPIND(x) ((x) + 0x248)
  228. #define USBD_EPMAXPSIZE(x) ((x) + 0x24C)
  229. /* DMA support registers only below */
  230. /* Set, clear, or get enabled state of the DMA request status. If
  231. * enabled, an IN or OUT token will start a DMA transfer for the EP */
  232. #define USBD_DMARST(x) ((x) + 0x250)
  233. #define USBD_DMARCLR(x) ((x) + 0x254)
  234. #define USBD_DMARSET(x) ((x) + 0x258)
  235. /* DMA UDCA head pointer */
  236. #define USBD_UDCAH(x) ((x) + 0x280)
  237. /* EP DMA status, enable, and disable. This is used to specifically
  238. * enabled or disable DMA for a specific EP */
  239. #define USBD_EPDMAST(x) ((x) + 0x284)
  240. #define USBD_EPDMAEN(x) ((x) + 0x288)
  241. #define USBD_EPDMADIS(x) ((x) + 0x28C)
  242. /* DMA master interrupts enable and pending interrupts */
  243. #define USBD_DMAINTST(x) ((x) + 0x290)
  244. #define USBD_DMAINTEN(x) ((x) + 0x294)
  245. /* DMA end of transfer interrupt enable, disable, status */
  246. #define USBD_EOTINTST(x) ((x) + 0x2A0)
  247. #define USBD_EOTINTCLR(x) ((x) + 0x2A4)
  248. #define USBD_EOTINTSET(x) ((x) + 0x2A8)
  249. /* New DD request interrupt enable, disable, status */
  250. #define USBD_NDDRTINTST(x) ((x) + 0x2AC)
  251. #define USBD_NDDRTINTCLR(x) ((x) + 0x2B0)
  252. #define USBD_NDDRTINTSET(x) ((x) + 0x2B4)
  253. /* DMA error interrupt enable, disable, status */
  254. #define USBD_SYSERRTINTST(x) ((x) + 0x2B8)
  255. #define USBD_SYSERRTINTCLR(x) ((x) + 0x2BC)
  256. #define USBD_SYSERRTINTSET(x) ((x) + 0x2C0)
  257. /**********************************************************************
  258. * USBD_DEVINTST/USBD_DEVINTEN/USBD_DEVINTCLR/USBD_DEVINTSET/
  259. * USBD_DEVINTPRI register definitions
  260. **********************************************************************/
  261. #define USBD_ERR_INT (1 << 9)
  262. #define USBD_EP_RLZED (1 << 8)
  263. #define USBD_TXENDPKT (1 << 7)
  264. #define USBD_RXENDPKT (1 << 6)
  265. #define USBD_CDFULL (1 << 5)
  266. #define USBD_CCEMPTY (1 << 4)
  267. #define USBD_DEV_STAT (1 << 3)
  268. #define USBD_EP_SLOW (1 << 2)
  269. #define USBD_EP_FAST (1 << 1)
  270. #define USBD_FRAME (1 << 0)
  271. /**********************************************************************
  272. * USBD_EPINTST/USBD_EPINTEN/USBD_EPINTCLR/USBD_EPINTSET/
  273. * USBD_EPINTPRI register definitions
  274. **********************************************************************/
  275. /* End point selection macro (RX) */
  276. #define USBD_RX_EP_SEL(e) (1 << ((e) << 1))
  277. /* End point selection macro (TX) */
  278. #define USBD_TX_EP_SEL(e) (1 << (((e) << 1) + 1))
  279. /**********************************************************************
  280. * USBD_REEP/USBD_DMARST/USBD_DMARCLR/USBD_DMARSET/USBD_EPDMAST/
  281. * USBD_EPDMAEN/USBD_EPDMADIS/
  282. * USBD_NDDRTINTST/USBD_NDDRTINTCLR/USBD_NDDRTINTSET/
  283. * USBD_EOTINTST/USBD_EOTINTCLR/USBD_EOTINTSET/
  284. * USBD_SYSERRTINTST/USBD_SYSERRTINTCLR/USBD_SYSERRTINTSET
  285. * register definitions
  286. **********************************************************************/
  287. /* Endpoint selection macro */
  288. #define USBD_EP_SEL(e) (1 << (e))
  289. /**********************************************************************
  290. * SBD_DMAINTST/USBD_DMAINTEN
  291. **********************************************************************/
  292. #define USBD_SYS_ERR_INT (1 << 2)
  293. #define USBD_NEW_DD_INT (1 << 1)
  294. #define USBD_EOT_INT (1 << 0)
  295. /**********************************************************************
  296. * USBD_RXPLEN register definitions
  297. **********************************************************************/
  298. #define USBD_PKT_RDY (1 << 11)
  299. #define USBD_DV (1 << 10)
  300. #define USBD_PK_LEN_MASK 0x3FF
  301. /**********************************************************************
  302. * USBD_CTRL register definitions
  303. **********************************************************************/
  304. #define USBD_LOG_ENDPOINT(e) ((e) << 2)
  305. #define USBD_WR_EN (1 << 1)
  306. #define USBD_RD_EN (1 << 0)
  307. /**********************************************************************
  308. * USBD_CMDCODE register definitions
  309. **********************************************************************/
  310. #define USBD_CMD_CODE(c) ((c) << 16)
  311. #define USBD_CMD_PHASE(p) ((p) << 8)
  312. /**********************************************************************
  313. * USBD_DMARST/USBD_DMARCLR/USBD_DMARSET register definitions
  314. **********************************************************************/
  315. #define USBD_DMAEP(e) (1 << (e))
  316. /* DD (DMA Descriptor) structure, requires word alignment */
  317. struct lpc32xx_usbd_dd {
  318. u32 *dd_next;
  319. u32 dd_setup;
  320. u32 dd_buffer_addr;
  321. u32 dd_status;
  322. u32 dd_iso_ps_mem_addr;
  323. };
  324. /* dd_setup bit defines */
  325. #define DD_SETUP_ATLE_DMA_MODE 0x01
  326. #define DD_SETUP_NEXT_DD_VALID 0x04
  327. #define DD_SETUP_ISO_EP 0x10
  328. #define DD_SETUP_PACKETLEN(n) (((n) & 0x7FF) << 5)
  329. #define DD_SETUP_DMALENBYTES(n) (((n) & 0xFFFF) << 16)
  330. /* dd_status bit defines */
  331. #define DD_STATUS_DD_RETIRED 0x01
  332. #define DD_STATUS_STS_MASK 0x1E
  333. #define DD_STATUS_STS_NS 0x00 /* Not serviced */
  334. #define DD_STATUS_STS_BS 0x02 /* Being serviced */
  335. #define DD_STATUS_STS_NC 0x04 /* Normal completion */
  336. #define DD_STATUS_STS_DUR 0x06 /* Data underrun (short packet) */
  337. #define DD_STATUS_STS_DOR 0x08 /* Data overrun */
  338. #define DD_STATUS_STS_SE 0x12 /* System error */
  339. #define DD_STATUS_PKT_VAL 0x20 /* Packet valid */
  340. #define DD_STATUS_LSB_EX 0x40 /* LS byte extracted (ATLE) */
  341. #define DD_STATUS_MSB_EX 0x80 /* MS byte extracted (ATLE) */
  342. #define DD_STATUS_MLEN(n) (((n) >> 8) & 0x3F)
  343. #define DD_STATUS_CURDMACNT(n) (((n) >> 16) & 0xFFFF)
  344. /*
  345. *
  346. * Protocol engine bits below
  347. *
  348. */
  349. /* Device Interrupt Bit Definitions */
  350. #define FRAME_INT 0x00000001
  351. #define EP_FAST_INT 0x00000002
  352. #define EP_SLOW_INT 0x00000004
  353. #define DEV_STAT_INT 0x00000008
  354. #define CCEMTY_INT 0x00000010
  355. #define CDFULL_INT 0x00000020
  356. #define RxENDPKT_INT 0x00000040
  357. #define TxENDPKT_INT 0x00000080
  358. #define EP_RLZED_INT 0x00000100
  359. #define ERR_INT 0x00000200
  360. /* Rx & Tx Packet Length Definitions */
  361. #define PKT_LNGTH_MASK 0x000003FF
  362. #define PKT_DV 0x00000400
  363. #define PKT_RDY 0x00000800
  364. /* USB Control Definitions */
  365. #define CTRL_RD_EN 0x00000001
  366. #define CTRL_WR_EN 0x00000002
  367. /* Command Codes */
  368. #define CMD_SET_ADDR 0x00D00500
  369. #define CMD_CFG_DEV 0x00D80500
  370. #define CMD_SET_MODE 0x00F30500
  371. #define CMD_RD_FRAME 0x00F50500
  372. #define DAT_RD_FRAME 0x00F50200
  373. #define CMD_RD_TEST 0x00FD0500
  374. #define DAT_RD_TEST 0x00FD0200
  375. #define CMD_SET_DEV_STAT 0x00FE0500
  376. #define CMD_GET_DEV_STAT 0x00FE0500
  377. #define DAT_GET_DEV_STAT 0x00FE0200
  378. #define CMD_GET_ERR_CODE 0x00FF0500
  379. #define DAT_GET_ERR_CODE 0x00FF0200
  380. #define CMD_RD_ERR_STAT 0x00FB0500
  381. #define DAT_RD_ERR_STAT 0x00FB0200
  382. #define DAT_WR_BYTE(x) (0x00000100 | ((x) << 16))
  383. #define CMD_SEL_EP(x) (0x00000500 | ((x) << 16))
  384. #define DAT_SEL_EP(x) (0x00000200 | ((x) << 16))
  385. #define CMD_SEL_EP_CLRI(x) (0x00400500 | ((x) << 16))
  386. #define DAT_SEL_EP_CLRI(x) (0x00400200 | ((x) << 16))
  387. #define CMD_SET_EP_STAT(x) (0x00400500 | ((x) << 16))
  388. #define CMD_CLR_BUF 0x00F20500
  389. #define DAT_CLR_BUF 0x00F20200
  390. #define CMD_VALID_BUF 0x00FA0500
  391. /* Device Address Register Definitions */
  392. #define DEV_ADDR_MASK 0x7F
  393. #define DEV_EN 0x80
  394. /* Device Configure Register Definitions */
  395. #define CONF_DVICE 0x01
  396. /* Device Mode Register Definitions */
  397. #define AP_CLK 0x01
  398. #define INAK_CI 0x02
  399. #define INAK_CO 0x04
  400. #define INAK_II 0x08
  401. #define INAK_IO 0x10
  402. #define INAK_BI 0x20
  403. #define INAK_BO 0x40
  404. /* Device Status Register Definitions */
  405. #define DEV_CON 0x01
  406. #define DEV_CON_CH 0x02
  407. #define DEV_SUS 0x04
  408. #define DEV_SUS_CH 0x08
  409. #define DEV_RST 0x10
  410. /* Error Code Register Definitions */
  411. #define ERR_EC_MASK 0x0F
  412. #define ERR_EA 0x10
  413. /* Error Status Register Definitions */
  414. #define ERR_PID 0x01
  415. #define ERR_UEPKT 0x02
  416. #define ERR_DCRC 0x04
  417. #define ERR_TIMOUT 0x08
  418. #define ERR_EOP 0x10
  419. #define ERR_B_OVRN 0x20
  420. #define ERR_BTSTF 0x40
  421. #define ERR_TGL 0x80
  422. /* Endpoint Select Register Definitions */
  423. #define EP_SEL_F 0x01
  424. #define EP_SEL_ST 0x02
  425. #define EP_SEL_STP 0x04
  426. #define EP_SEL_PO 0x08
  427. #define EP_SEL_EPN 0x10
  428. #define EP_SEL_B_1_FULL 0x20
  429. #define EP_SEL_B_2_FULL 0x40
  430. /* Endpoint Status Register Definitions */
  431. #define EP_STAT_ST 0x01
  432. #define EP_STAT_DA 0x20
  433. #define EP_STAT_RF_MO 0x40
  434. #define EP_STAT_CND_ST 0x80
  435. /* Clear Buffer Register Definitions */
  436. #define CLR_BUF_PO 0x01
  437. /* DMA Interrupt Bit Definitions */
  438. #define EOT_INT 0x01
  439. #define NDD_REQ_INT 0x02
  440. #define SYS_ERR_INT 0x04
  441. #define DRIVER_VERSION "1.03"
  442. static const char driver_name[] = "lpc32xx_udc";
  443. /*
  444. *
  445. * proc interface support
  446. *
  447. */
  448. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  449. static char *epnames[] = {"INT", "ISO", "BULK", "CTRL"};
  450. static const char debug_filename[] = "driver/udc";
  451. static void proc_ep_show(struct seq_file *s, struct lpc32xx_ep *ep)
  452. {
  453. struct lpc32xx_request *req;
  454. seq_printf(s, "\n");
  455. seq_printf(s, "%12s, maxpacket %4d %3s",
  456. ep->ep.name, ep->ep.maxpacket,
  457. ep->is_in ? "in" : "out");
  458. seq_printf(s, " type %4s", epnames[ep->eptype]);
  459. seq_printf(s, " ints: %12d", ep->totalints);
  460. if (list_empty(&ep->queue))
  461. seq_printf(s, "\t(queue empty)\n");
  462. else {
  463. list_for_each_entry(req, &ep->queue, queue) {
  464. u32 length = req->req.actual;
  465. seq_printf(s, "\treq %p len %d/%d buf %p\n",
  466. &req->req, length,
  467. req->req.length, req->req.buf);
  468. }
  469. }
  470. }
  471. static int proc_udc_show(struct seq_file *s, void *unused)
  472. {
  473. struct lpc32xx_udc *udc = s->private;
  474. struct lpc32xx_ep *ep;
  475. unsigned long flags;
  476. seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION);
  477. spin_lock_irqsave(&udc->lock, flags);
  478. seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n",
  479. udc->vbus ? "present" : "off",
  480. udc->enabled ? (udc->vbus ? "active" : "enabled") :
  481. "disabled",
  482. udc->selfpowered ? "self" : "VBUS",
  483. udc->suspended ? ", suspended" : "",
  484. udc->driver ? udc->driver->driver.name : "(none)");
  485. if (udc->enabled && udc->vbus) {
  486. proc_ep_show(s, &udc->ep[0]);
  487. list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list)
  488. proc_ep_show(s, ep);
  489. }
  490. spin_unlock_irqrestore(&udc->lock, flags);
  491. return 0;
  492. }
  493. static int proc_udc_open(struct inode *inode, struct file *file)
  494. {
  495. return single_open(file, proc_udc_show, PDE_DATA(inode));
  496. }
  497. static const struct file_operations proc_ops = {
  498. .owner = THIS_MODULE,
  499. .open = proc_udc_open,
  500. .read = seq_read,
  501. .llseek = seq_lseek,
  502. .release = single_release,
  503. };
  504. static void create_debug_file(struct lpc32xx_udc *udc)
  505. {
  506. udc->pde = debugfs_create_file(debug_filename, 0, NULL, udc, &proc_ops);
  507. }
  508. static void remove_debug_file(struct lpc32xx_udc *udc)
  509. {
  510. debugfs_remove(udc->pde);
  511. }
  512. #else
  513. static inline void create_debug_file(struct lpc32xx_udc *udc) {}
  514. static inline void remove_debug_file(struct lpc32xx_udc *udc) {}
  515. #endif
  516. /* Primary initialization sequence for the ISP1301 transceiver */
  517. static void isp1301_udc_configure(struct lpc32xx_udc *udc)
  518. {
  519. /* LPC32XX only supports DAT_SE0 USB mode */
  520. /* This sequence is important */
  521. /* Disable transparent UART mode first */
  522. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  523. (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
  524. MC1_UART_EN);
  525. /* Set full speed and SE0 mode */
  526. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  527. (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
  528. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  529. ISP1301_I2C_MODE_CONTROL_1, (MC1_SPEED_REG | MC1_DAT_SE0));
  530. /*
  531. * The PSW_OE enable bit state is reversed in the ISP1301 User's Guide
  532. */
  533. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  534. (ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
  535. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  536. ISP1301_I2C_MODE_CONTROL_2, (MC2_BI_DI | MC2_SPD_SUSP_CTRL));
  537. /* Driver VBUS_DRV high or low depending on board setup */
  538. if (udc->board->vbus_drv_pol != 0)
  539. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  540. ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DRV);
  541. else
  542. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  543. ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
  544. OTG1_VBUS_DRV);
  545. /* Bi-directional mode with suspend control
  546. * Enable both pulldowns for now - the pullup will be enable when VBUS
  547. * is detected */
  548. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  549. (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
  550. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  551. ISP1301_I2C_OTG_CONTROL_1,
  552. (0 | OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN));
  553. /* Discharge VBUS (just in case) */
  554. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  555. ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
  556. msleep(1);
  557. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  558. (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
  559. OTG1_VBUS_DISCHRG);
  560. /* Clear and enable VBUS high edge interrupt */
  561. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  562. ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
  563. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  564. ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
  565. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  566. ISP1301_I2C_INTERRUPT_FALLING, INT_VBUS_VLD);
  567. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  568. ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
  569. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  570. ISP1301_I2C_INTERRUPT_RISING, INT_VBUS_VLD);
  571. /* Enable usb_need_clk clock after transceiver is initialized */
  572. writel((readl(USB_CTRL) | USB_DEV_NEED_CLK_EN), USB_CTRL);
  573. dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n",
  574. i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00));
  575. dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n",
  576. i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x02));
  577. dev_info(udc->dev, "ISP1301 Version ID : 0x%04x\n",
  578. i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x14));
  579. }
  580. /* Enables or disables the USB device pullup via the ISP1301 transceiver */
  581. static void isp1301_pullup_set(struct lpc32xx_udc *udc)
  582. {
  583. if (udc->pullup)
  584. /* Enable pullup for bus signalling */
  585. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  586. ISP1301_I2C_OTG_CONTROL_1, OTG1_DP_PULLUP);
  587. else
  588. /* Enable pullup for bus signalling */
  589. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  590. ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
  591. OTG1_DP_PULLUP);
  592. }
  593. static void pullup_work(struct work_struct *work)
  594. {
  595. struct lpc32xx_udc *udc =
  596. container_of(work, struct lpc32xx_udc, pullup_job);
  597. isp1301_pullup_set(udc);
  598. }
  599. static void isp1301_pullup_enable(struct lpc32xx_udc *udc, int en_pullup,
  600. int block)
  601. {
  602. if (en_pullup == udc->pullup)
  603. return;
  604. udc->pullup = en_pullup;
  605. if (block)
  606. isp1301_pullup_set(udc);
  607. else
  608. /* defer slow i2c pull up setting */
  609. schedule_work(&udc->pullup_job);
  610. }
  611. #ifdef CONFIG_PM
  612. /* Powers up or down the ISP1301 transceiver */
  613. static void isp1301_set_powerstate(struct lpc32xx_udc *udc, int enable)
  614. {
  615. if (enable != 0)
  616. /* Power up ISP1301 - this ISP1301 will automatically wakeup
  617. when VBUS is detected */
  618. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  619. ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR,
  620. MC2_GLOBAL_PWR_DN);
  621. else
  622. /* Power down ISP1301 */
  623. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  624. ISP1301_I2C_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
  625. }
  626. static void power_work(struct work_struct *work)
  627. {
  628. struct lpc32xx_udc *udc =
  629. container_of(work, struct lpc32xx_udc, power_job);
  630. isp1301_set_powerstate(udc, udc->poweron);
  631. }
  632. #endif
  633. /*
  634. *
  635. * USB protocol engine command/data read/write helper functions
  636. *
  637. */
  638. /* Issues a single command to the USB device state machine */
  639. static void udc_protocol_cmd_w(struct lpc32xx_udc *udc, u32 cmd)
  640. {
  641. u32 pass = 0;
  642. int to;
  643. /* EP may lock on CLRI if this read isn't done */
  644. u32 tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
  645. (void) tmp;
  646. while (pass == 0) {
  647. writel(USBD_CCEMPTY, USBD_DEVINTCLR(udc->udp_baseaddr));
  648. /* Write command code */
  649. writel(cmd, USBD_CMDCODE(udc->udp_baseaddr));
  650. to = 10000;
  651. while (((readl(USBD_DEVINTST(udc->udp_baseaddr)) &
  652. USBD_CCEMPTY) == 0) && (to > 0)) {
  653. to--;
  654. }
  655. if (to > 0)
  656. pass = 1;
  657. cpu_relax();
  658. }
  659. }
  660. /* Issues 2 commands (or command and data) to the USB device state machine */
  661. static inline void udc_protocol_cmd_data_w(struct lpc32xx_udc *udc, u32 cmd,
  662. u32 data)
  663. {
  664. udc_protocol_cmd_w(udc, cmd);
  665. udc_protocol_cmd_w(udc, data);
  666. }
  667. /* Issues a single command to the USB device state machine and reads
  668. * response data */
  669. static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd)
  670. {
  671. u32 tmp;
  672. int to = 1000;
  673. /* Write a command and read data from the protocol engine */
  674. writel((USBD_CDFULL | USBD_CCEMPTY),
  675. USBD_DEVINTCLR(udc->udp_baseaddr));
  676. /* Write command code */
  677. udc_protocol_cmd_w(udc, cmd);
  678. tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
  679. while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CDFULL))
  680. && (to > 0))
  681. to--;
  682. if (!to)
  683. dev_dbg(udc->dev,
  684. "Protocol engine didn't receive response (CDFULL)\n");
  685. return readl(USBD_CMDDATA(udc->udp_baseaddr));
  686. }
  687. /*
  688. *
  689. * USB device interrupt mask support functions
  690. *
  691. */
  692. /* Enable one or more USB device interrupts */
  693. static inline void uda_enable_devint(struct lpc32xx_udc *udc, u32 devmask)
  694. {
  695. udc->enabled_devints |= devmask;
  696. writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
  697. }
  698. /* Disable one or more USB device interrupts */
  699. static inline void uda_disable_devint(struct lpc32xx_udc *udc, u32 mask)
  700. {
  701. udc->enabled_devints &= ~mask;
  702. writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
  703. }
  704. /* Clear one or more USB device interrupts */
  705. static inline void uda_clear_devint(struct lpc32xx_udc *udc, u32 mask)
  706. {
  707. writel(mask, USBD_DEVINTCLR(udc->udp_baseaddr));
  708. }
  709. /*
  710. *
  711. * Endpoint interrupt disable/enable functions
  712. *
  713. */
  714. /* Enable one or more USB endpoint interrupts */
  715. static void uda_enable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
  716. {
  717. udc->enabled_hwepints |= (1 << hwep);
  718. writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
  719. }
  720. /* Disable one or more USB endpoint interrupts */
  721. static void uda_disable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
  722. {
  723. udc->enabled_hwepints &= ~(1 << hwep);
  724. writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
  725. }
  726. /* Clear one or more USB endpoint interrupts */
  727. static inline void uda_clear_hwepint(struct lpc32xx_udc *udc, u32 hwep)
  728. {
  729. writel((1 << hwep), USBD_EPINTCLR(udc->udp_baseaddr));
  730. }
  731. /* Enable DMA for the HW channel */
  732. static inline void udc_ep_dma_enable(struct lpc32xx_udc *udc, u32 hwep)
  733. {
  734. writel((1 << hwep), USBD_EPDMAEN(udc->udp_baseaddr));
  735. }
  736. /* Disable DMA for the HW channel */
  737. static inline void udc_ep_dma_disable(struct lpc32xx_udc *udc, u32 hwep)
  738. {
  739. writel((1 << hwep), USBD_EPDMADIS(udc->udp_baseaddr));
  740. }
  741. /*
  742. *
  743. * Endpoint realize/unrealize functions
  744. *
  745. */
  746. /* Before an endpoint can be used, it needs to be realized
  747. * in the USB protocol engine - this realizes the endpoint.
  748. * The interrupt (FIFO or DMA) is not enabled with this function */
  749. static void udc_realize_hwep(struct lpc32xx_udc *udc, u32 hwep,
  750. u32 maxpacket)
  751. {
  752. int to = 1000;
  753. writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
  754. writel(hwep, USBD_EPIND(udc->udp_baseaddr));
  755. udc->realized_eps |= (1 << hwep);
  756. writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
  757. writel(maxpacket, USBD_EPMAXPSIZE(udc->udp_baseaddr));
  758. /* Wait until endpoint is realized in hardware */
  759. while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) &
  760. USBD_EP_RLZED)) && (to > 0))
  761. to--;
  762. if (!to)
  763. dev_dbg(udc->dev, "EP not correctly realized in hardware\n");
  764. writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
  765. }
  766. /* Unrealize an EP */
  767. static void udc_unrealize_hwep(struct lpc32xx_udc *udc, u32 hwep)
  768. {
  769. udc->realized_eps &= ~(1 << hwep);
  770. writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
  771. }
  772. /*
  773. *
  774. * Endpoint support functions
  775. *
  776. */
  777. /* Select and clear endpoint interrupt */
  778. static u32 udc_selep_clrint(struct lpc32xx_udc *udc, u32 hwep)
  779. {
  780. udc_protocol_cmd_w(udc, CMD_SEL_EP_CLRI(hwep));
  781. return udc_protocol_cmd_r(udc, DAT_SEL_EP_CLRI(hwep));
  782. }
  783. /* Disables the endpoint in the USB protocol engine */
  784. static void udc_disable_hwep(struct lpc32xx_udc *udc, u32 hwep)
  785. {
  786. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
  787. DAT_WR_BYTE(EP_STAT_DA));
  788. }
  789. /* Stalls the endpoint - endpoint will return STALL */
  790. static void udc_stall_hwep(struct lpc32xx_udc *udc, u32 hwep)
  791. {
  792. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
  793. DAT_WR_BYTE(EP_STAT_ST));
  794. }
  795. /* Clear stall or reset endpoint */
  796. static void udc_clrstall_hwep(struct lpc32xx_udc *udc, u32 hwep)
  797. {
  798. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
  799. DAT_WR_BYTE(0));
  800. }
  801. /* Select an endpoint for endpoint status, clear, validate */
  802. static void udc_select_hwep(struct lpc32xx_udc *udc, u32 hwep)
  803. {
  804. udc_protocol_cmd_w(udc, CMD_SEL_EP(hwep));
  805. }
  806. /*
  807. *
  808. * Endpoint buffer management functions
  809. *
  810. */
  811. /* Clear the current endpoint's buffer */
  812. static void udc_clr_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
  813. {
  814. udc_select_hwep(udc, hwep);
  815. udc_protocol_cmd_w(udc, CMD_CLR_BUF);
  816. }
  817. /* Validate the current endpoint's buffer */
  818. static void udc_val_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
  819. {
  820. udc_select_hwep(udc, hwep);
  821. udc_protocol_cmd_w(udc, CMD_VALID_BUF);
  822. }
  823. static inline u32 udc_clearep_getsts(struct lpc32xx_udc *udc, u32 hwep)
  824. {
  825. /* Clear EP interrupt */
  826. uda_clear_hwepint(udc, hwep);
  827. return udc_selep_clrint(udc, hwep);
  828. }
  829. /*
  830. *
  831. * USB EP DMA support
  832. *
  833. */
  834. /* Allocate a DMA Descriptor */
  835. static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc)
  836. {
  837. dma_addr_t dma;
  838. struct lpc32xx_usbd_dd_gad *dd;
  839. dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc(
  840. udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma);
  841. if (dd)
  842. dd->this_dma = dma;
  843. return dd;
  844. }
  845. /* Free a DMA Descriptor */
  846. static void udc_dd_free(struct lpc32xx_udc *udc, struct lpc32xx_usbd_dd_gad *dd)
  847. {
  848. dma_pool_free(udc->dd_cache, dd, dd->this_dma);
  849. }
  850. /*
  851. *
  852. * USB setup and shutdown functions
  853. *
  854. */
  855. /* Enables or disables most of the USB system clocks when low power mode is
  856. * needed. Clocks are typically started on a connection event, and disabled
  857. * when a cable is disconnected */
  858. static void udc_clk_set(struct lpc32xx_udc *udc, int enable)
  859. {
  860. if (enable != 0) {
  861. if (udc->clocked)
  862. return;
  863. udc->clocked = 1;
  864. /* 48MHz PLL up */
  865. clk_enable(udc->usb_pll_clk);
  866. /* Enable the USB device clock */
  867. writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN,
  868. USB_CTRL);
  869. clk_enable(udc->usb_otg_clk);
  870. } else {
  871. if (!udc->clocked)
  872. return;
  873. udc->clocked = 0;
  874. /* Never disable the USB_HCLK during normal operation */
  875. /* 48MHz PLL dpwn */
  876. clk_disable(udc->usb_pll_clk);
  877. /* Disable the USB device clock */
  878. writel(readl(USB_CTRL) & ~USB_DEV_NEED_CLK_EN,
  879. USB_CTRL);
  880. clk_disable(udc->usb_otg_clk);
  881. }
  882. }
  883. /* Set/reset USB device address */
  884. static void udc_set_address(struct lpc32xx_udc *udc, u32 addr)
  885. {
  886. /* Address will be latched at the end of the status phase, or
  887. latched immediately if function is called twice */
  888. udc_protocol_cmd_data_w(udc, CMD_SET_ADDR,
  889. DAT_WR_BYTE(DEV_EN | addr));
  890. }
  891. /* Setup up a IN request for DMA transfer - this consists of determining the
  892. * list of DMA addresses for the transfer, allocating DMA Descriptors,
  893. * installing the DD into the UDCA, and then enabling the DMA for that EP */
  894. static int udc_ep_in_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  895. {
  896. struct lpc32xx_request *req;
  897. u32 hwep = ep->hwep_num;
  898. ep->req_pending = 1;
  899. /* There will always be a request waiting here */
  900. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  901. /* Place the DD Descriptor into the UDCA */
  902. udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
  903. /* Enable DMA and interrupt for the HW EP */
  904. udc_ep_dma_enable(udc, hwep);
  905. /* Clear ZLP if last packet is not of MAXP size */
  906. if (req->req.length % ep->ep.maxpacket)
  907. req->send_zlp = 0;
  908. return 0;
  909. }
  910. /* Setup up a OUT request for DMA transfer - this consists of determining the
  911. * list of DMA addresses for the transfer, allocating DMA Descriptors,
  912. * installing the DD into the UDCA, and then enabling the DMA for that EP */
  913. static int udc_ep_out_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  914. {
  915. struct lpc32xx_request *req;
  916. u32 hwep = ep->hwep_num;
  917. ep->req_pending = 1;
  918. /* There will always be a request waiting here */
  919. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  920. /* Place the DD Descriptor into the UDCA */
  921. udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
  922. /* Enable DMA and interrupt for the HW EP */
  923. udc_ep_dma_enable(udc, hwep);
  924. return 0;
  925. }
  926. static void udc_disable(struct lpc32xx_udc *udc)
  927. {
  928. u32 i;
  929. /* Disable device */
  930. udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
  931. udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(0));
  932. /* Disable all device interrupts (including EP0) */
  933. uda_disable_devint(udc, 0x3FF);
  934. /* Disable and reset all endpoint interrupts */
  935. for (i = 0; i < 32; i++) {
  936. uda_disable_hwepint(udc, i);
  937. uda_clear_hwepint(udc, i);
  938. udc_disable_hwep(udc, i);
  939. udc_unrealize_hwep(udc, i);
  940. udc->udca_v_base[i] = 0;
  941. /* Disable and clear all interrupts and DMA */
  942. udc_ep_dma_disable(udc, i);
  943. writel((1 << i), USBD_EOTINTCLR(udc->udp_baseaddr));
  944. writel((1 << i), USBD_NDDRTINTCLR(udc->udp_baseaddr));
  945. writel((1 << i), USBD_SYSERRTINTCLR(udc->udp_baseaddr));
  946. writel((1 << i), USBD_DMARCLR(udc->udp_baseaddr));
  947. }
  948. /* Disable DMA interrupts */
  949. writel(0, USBD_DMAINTEN(udc->udp_baseaddr));
  950. writel(0, USBD_UDCAH(udc->udp_baseaddr));
  951. }
  952. static void udc_enable(struct lpc32xx_udc *udc)
  953. {
  954. u32 i;
  955. struct lpc32xx_ep *ep = &udc->ep[0];
  956. /* Start with known state */
  957. udc_disable(udc);
  958. /* Enable device */
  959. udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(DEV_CON));
  960. /* EP interrupts on high priority, FRAME interrupt on low priority */
  961. writel(USBD_EP_FAST, USBD_DEVINTPRI(udc->udp_baseaddr));
  962. writel(0xFFFF, USBD_EPINTPRI(udc->udp_baseaddr));
  963. /* Clear any pending device interrupts */
  964. writel(0x3FF, USBD_DEVINTCLR(udc->udp_baseaddr));
  965. /* Setup UDCA - not yet used (DMA) */
  966. writel(udc->udca_p_base, USBD_UDCAH(udc->udp_baseaddr));
  967. /* Only enable EP0 in and out for now, EP0 only works in FIFO mode */
  968. for (i = 0; i <= 1; i++) {
  969. udc_realize_hwep(udc, i, ep->ep.maxpacket);
  970. uda_enable_hwepint(udc, i);
  971. udc_select_hwep(udc, i);
  972. udc_clrstall_hwep(udc, i);
  973. udc_clr_buffer_hwep(udc, i);
  974. }
  975. /* Device interrupt setup */
  976. uda_clear_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
  977. USBD_EP_FAST));
  978. uda_enable_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
  979. USBD_EP_FAST));
  980. /* Set device address to 0 - called twice to force a latch in the USB
  981. engine without the need of a setup packet status closure */
  982. udc_set_address(udc, 0);
  983. udc_set_address(udc, 0);
  984. /* Enable master DMA interrupts */
  985. writel((USBD_SYS_ERR_INT | USBD_EOT_INT),
  986. USBD_DMAINTEN(udc->udp_baseaddr));
  987. udc->dev_status = 0;
  988. }
  989. /*
  990. *
  991. * USB device board specific events handled via callbacks
  992. *
  993. */
  994. /* Connection change event - notify board function of change */
  995. static void uda_power_event(struct lpc32xx_udc *udc, u32 conn)
  996. {
  997. /* Just notify of a connection change event (optional) */
  998. if (udc->board->conn_chgb != NULL)
  999. udc->board->conn_chgb(conn);
  1000. }
  1001. /* Suspend/resume event - notify board function of change */
  1002. static void uda_resm_susp_event(struct lpc32xx_udc *udc, u32 conn)
  1003. {
  1004. /* Just notify of a Suspend/resume change event (optional) */
  1005. if (udc->board->susp_chgb != NULL)
  1006. udc->board->susp_chgb(conn);
  1007. if (conn)
  1008. udc->suspended = 0;
  1009. else
  1010. udc->suspended = 1;
  1011. }
  1012. /* Remote wakeup enable/disable - notify board function of change */
  1013. static void uda_remwkp_cgh(struct lpc32xx_udc *udc)
  1014. {
  1015. if (udc->board->rmwk_chgb != NULL)
  1016. udc->board->rmwk_chgb(udc->dev_status &
  1017. (1 << USB_DEVICE_REMOTE_WAKEUP));
  1018. }
  1019. /* Reads data from FIFO, adjusts for alignment and data size */
  1020. static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
  1021. {
  1022. int n, i, bl;
  1023. u16 *p16;
  1024. u32 *p32, tmp, cbytes;
  1025. /* Use optimal data transfer method based on source address and size */
  1026. switch (((u32) data) & 0x3) {
  1027. case 0: /* 32-bit aligned */
  1028. p32 = (u32 *) data;
  1029. cbytes = (bytes & ~0x3);
  1030. /* Copy 32-bit aligned data first */
  1031. for (n = 0; n < cbytes; n += 4)
  1032. *p32++ = readl(USBD_RXDATA(udc->udp_baseaddr));
  1033. /* Handle any remaining bytes */
  1034. bl = bytes - cbytes;
  1035. if (bl) {
  1036. tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
  1037. for (n = 0; n < bl; n++)
  1038. data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
  1039. }
  1040. break;
  1041. case 1: /* 8-bit aligned */
  1042. case 3:
  1043. /* Each byte has to be handled independently */
  1044. for (n = 0; n < bytes; n += 4) {
  1045. tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
  1046. bl = bytes - n;
  1047. if (bl > 3)
  1048. bl = 3;
  1049. for (i = 0; i < bl; i++)
  1050. data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
  1051. }
  1052. break;
  1053. case 2: /* 16-bit aligned */
  1054. p16 = (u16 *) data;
  1055. cbytes = (bytes & ~0x3);
  1056. /* Copy 32-bit sized objects first with 16-bit alignment */
  1057. for (n = 0; n < cbytes; n += 4) {
  1058. tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
  1059. *p16++ = (u16)(tmp & 0xFFFF);
  1060. *p16++ = (u16)((tmp >> 16) & 0xFFFF);
  1061. }
  1062. /* Handle any remaining bytes */
  1063. bl = bytes - cbytes;
  1064. if (bl) {
  1065. tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
  1066. for (n = 0; n < bl; n++)
  1067. data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
  1068. }
  1069. break;
  1070. }
  1071. }
  1072. /* Read data from the FIFO for an endpoint. This function is for endpoints (such
  1073. * as EP0) that don't use DMA. This function should only be called if a packet
  1074. * is known to be ready to read for the endpoint. Note that the endpoint must
  1075. * be selected in the protocol engine prior to this call. */
  1076. static u32 udc_read_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
  1077. u32 bytes)
  1078. {
  1079. u32 tmpv;
  1080. int to = 1000;
  1081. u32 tmp, hwrep = ((hwep & 0x1E) << 1) | CTRL_RD_EN;
  1082. /* Setup read of endpoint */
  1083. writel(hwrep, USBD_CTRL(udc->udp_baseaddr));
  1084. /* Wait until packet is ready */
  1085. while ((((tmpv = readl(USBD_RXPLEN(udc->udp_baseaddr))) &
  1086. PKT_RDY) == 0) && (to > 0))
  1087. to--;
  1088. if (!to)
  1089. dev_dbg(udc->dev, "No packet ready on FIFO EP read\n");
  1090. /* Mask out count */
  1091. tmp = tmpv & PKT_LNGTH_MASK;
  1092. if (bytes < tmp)
  1093. tmp = bytes;
  1094. if ((tmp > 0) && (data != NULL))
  1095. udc_pop_fifo(udc, (u8 *) data, tmp);
  1096. writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
  1097. /* Clear the buffer */
  1098. udc_clr_buffer_hwep(udc, hwep);
  1099. return tmp;
  1100. }
  1101. /* Stuffs data into the FIFO, adjusts for alignment and data size */
  1102. static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
  1103. {
  1104. int n, i, bl;
  1105. u16 *p16;
  1106. u32 *p32, tmp, cbytes;
  1107. /* Use optimal data transfer method based on source address and size */
  1108. switch (((u32) data) & 0x3) {
  1109. case 0: /* 32-bit aligned */
  1110. p32 = (u32 *) data;
  1111. cbytes = (bytes & ~0x3);
  1112. /* Copy 32-bit aligned data first */
  1113. for (n = 0; n < cbytes; n += 4)
  1114. writel(*p32++, USBD_TXDATA(udc->udp_baseaddr));
  1115. /* Handle any remaining bytes */
  1116. bl = bytes - cbytes;
  1117. if (bl) {
  1118. tmp = 0;
  1119. for (n = 0; n < bl; n++)
  1120. tmp |= data[cbytes + n] << (n * 8);
  1121. writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
  1122. }
  1123. break;
  1124. case 1: /* 8-bit aligned */
  1125. case 3:
  1126. /* Each byte has to be handled independently */
  1127. for (n = 0; n < bytes; n += 4) {
  1128. bl = bytes - n;
  1129. if (bl > 4)
  1130. bl = 4;
  1131. tmp = 0;
  1132. for (i = 0; i < bl; i++)
  1133. tmp |= data[n + i] << (i * 8);
  1134. writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
  1135. }
  1136. break;
  1137. case 2: /* 16-bit aligned */
  1138. p16 = (u16 *) data;
  1139. cbytes = (bytes & ~0x3);
  1140. /* Copy 32-bit aligned data first */
  1141. for (n = 0; n < cbytes; n += 4) {
  1142. tmp = *p16++ & 0xFFFF;
  1143. tmp |= (*p16++ & 0xFFFF) << 16;
  1144. writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
  1145. }
  1146. /* Handle any remaining bytes */
  1147. bl = bytes - cbytes;
  1148. if (bl) {
  1149. tmp = 0;
  1150. for (n = 0; n < bl; n++)
  1151. tmp |= data[cbytes + n] << (n * 8);
  1152. writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
  1153. }
  1154. break;
  1155. }
  1156. }
  1157. /* Write data to the FIFO for an endpoint. This function is for endpoints (such
  1158. * as EP0) that don't use DMA. Note that the endpoint must be selected in the
  1159. * protocol engine prior to this call. */
  1160. static void udc_write_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
  1161. u32 bytes)
  1162. {
  1163. u32 hwwep = ((hwep & 0x1E) << 1) | CTRL_WR_EN;
  1164. if ((bytes > 0) && (data == NULL))
  1165. return;
  1166. /* Setup write of endpoint */
  1167. writel(hwwep, USBD_CTRL(udc->udp_baseaddr));
  1168. writel(bytes, USBD_TXPLEN(udc->udp_baseaddr));
  1169. /* Need at least 1 byte to trigger TX */
  1170. if (bytes == 0)
  1171. writel(0, USBD_TXDATA(udc->udp_baseaddr));
  1172. else
  1173. udc_stuff_fifo(udc, (u8 *) data, bytes);
  1174. writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
  1175. udc_val_buffer_hwep(udc, hwep);
  1176. }
  1177. /* USB device reset - resets USB to a default state with just EP0
  1178. enabled */
  1179. static void uda_usb_reset(struct lpc32xx_udc *udc)
  1180. {
  1181. u32 i = 0;
  1182. /* Re-init device controller and EP0 */
  1183. udc_enable(udc);
  1184. udc->gadget.speed = USB_SPEED_FULL;
  1185. for (i = 1; i < NUM_ENDPOINTS; i++) {
  1186. struct lpc32xx_ep *ep = &udc->ep[i];
  1187. ep->req_pending = 0;
  1188. }
  1189. }
  1190. /* Send a ZLP on EP0 */
  1191. static void udc_ep0_send_zlp(struct lpc32xx_udc *udc)
  1192. {
  1193. udc_write_hwep(udc, EP_IN, NULL, 0);
  1194. }
  1195. /* Get current frame number */
  1196. static u16 udc_get_current_frame(struct lpc32xx_udc *udc)
  1197. {
  1198. u16 flo, fhi;
  1199. udc_protocol_cmd_w(udc, CMD_RD_FRAME);
  1200. flo = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
  1201. fhi = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
  1202. return (fhi << 8) | flo;
  1203. }
  1204. /* Set the device as configured - enables all endpoints */
  1205. static inline void udc_set_device_configured(struct lpc32xx_udc *udc)
  1206. {
  1207. udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(CONF_DVICE));
  1208. }
  1209. /* Set the device as unconfigured - disables all endpoints */
  1210. static inline void udc_set_device_unconfigured(struct lpc32xx_udc *udc)
  1211. {
  1212. udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
  1213. }
  1214. /* reinit == restore initial software state */
  1215. static void udc_reinit(struct lpc32xx_udc *udc)
  1216. {
  1217. u32 i;
  1218. INIT_LIST_HEAD(&udc->gadget.ep_list);
  1219. INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
  1220. for (i = 0; i < NUM_ENDPOINTS; i++) {
  1221. struct lpc32xx_ep *ep = &udc->ep[i];
  1222. if (i != 0)
  1223. list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
  1224. usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket);
  1225. INIT_LIST_HEAD(&ep->queue);
  1226. ep->req_pending = 0;
  1227. }
  1228. udc->ep0state = WAIT_FOR_SETUP;
  1229. }
  1230. /* Must be called with lock */
  1231. static void done(struct lpc32xx_ep *ep, struct lpc32xx_request *req, int status)
  1232. {
  1233. struct lpc32xx_udc *udc = ep->udc;
  1234. list_del_init(&req->queue);
  1235. if (req->req.status == -EINPROGRESS)
  1236. req->req.status = status;
  1237. else
  1238. status = req->req.status;
  1239. if (ep->lep) {
  1240. usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in);
  1241. /* Free DDs */
  1242. udc_dd_free(udc, req->dd_desc_ptr);
  1243. }
  1244. if (status && status != -ESHUTDOWN)
  1245. ep_dbg(ep, "%s done %p, status %d\n", ep->ep.name, req, status);
  1246. ep->req_pending = 0;
  1247. spin_unlock(&udc->lock);
  1248. usb_gadget_giveback_request(&ep->ep, &req->req);
  1249. spin_lock(&udc->lock);
  1250. }
  1251. /* Must be called with lock */
  1252. static void nuke(struct lpc32xx_ep *ep, int status)
  1253. {
  1254. struct lpc32xx_request *req;
  1255. while (!list_empty(&ep->queue)) {
  1256. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  1257. done(ep, req, status);
  1258. }
  1259. if (status == -ESHUTDOWN) {
  1260. uda_disable_hwepint(ep->udc, ep->hwep_num);
  1261. udc_disable_hwep(ep->udc, ep->hwep_num);
  1262. }
  1263. }
  1264. /* IN endpoint 0 transfer */
  1265. static int udc_ep0_in_req(struct lpc32xx_udc *udc)
  1266. {
  1267. struct lpc32xx_request *req;
  1268. struct lpc32xx_ep *ep0 = &udc->ep[0];
  1269. u32 tsend, ts = 0;
  1270. if (list_empty(&ep0->queue))
  1271. /* Nothing to send */
  1272. return 0;
  1273. else
  1274. req = list_entry(ep0->queue.next, struct lpc32xx_request,
  1275. queue);
  1276. tsend = ts = req->req.length - req->req.actual;
  1277. if (ts == 0) {
  1278. /* Send a ZLP */
  1279. udc_ep0_send_zlp(udc);
  1280. done(ep0, req, 0);
  1281. return 1;
  1282. } else if (ts > ep0->ep.maxpacket)
  1283. ts = ep0->ep.maxpacket; /* Just send what we can */
  1284. /* Write data to the EP0 FIFO and start transfer */
  1285. udc_write_hwep(udc, EP_IN, (req->req.buf + req->req.actual), ts);
  1286. /* Increment data pointer */
  1287. req->req.actual += ts;
  1288. if (tsend >= ep0->ep.maxpacket)
  1289. return 0; /* Stay in data transfer state */
  1290. /* Transfer request is complete */
  1291. udc->ep0state = WAIT_FOR_SETUP;
  1292. done(ep0, req, 0);
  1293. return 1;
  1294. }
  1295. /* OUT endpoint 0 transfer */
  1296. static int udc_ep0_out_req(struct lpc32xx_udc *udc)
  1297. {
  1298. struct lpc32xx_request *req;
  1299. struct lpc32xx_ep *ep0 = &udc->ep[0];
  1300. u32 tr, bufferspace;
  1301. if (list_empty(&ep0->queue))
  1302. return 0;
  1303. else
  1304. req = list_entry(ep0->queue.next, struct lpc32xx_request,
  1305. queue);
  1306. if (req) {
  1307. if (req->req.length == 0) {
  1308. /* Just dequeue request */
  1309. done(ep0, req, 0);
  1310. udc->ep0state = WAIT_FOR_SETUP;
  1311. return 1;
  1312. }
  1313. /* Get data from FIFO */
  1314. bufferspace = req->req.length - req->req.actual;
  1315. if (bufferspace > ep0->ep.maxpacket)
  1316. bufferspace = ep0->ep.maxpacket;
  1317. /* Copy data to buffer */
  1318. prefetchw(req->req.buf + req->req.actual);
  1319. tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual,
  1320. bufferspace);
  1321. req->req.actual += bufferspace;
  1322. if (tr < ep0->ep.maxpacket) {
  1323. /* This is the last packet */
  1324. done(ep0, req, 0);
  1325. udc->ep0state = WAIT_FOR_SETUP;
  1326. return 1;
  1327. }
  1328. }
  1329. return 0;
  1330. }
  1331. /* Must be called with lock */
  1332. static void stop_activity(struct lpc32xx_udc *udc)
  1333. {
  1334. struct usb_gadget_driver *driver = udc->driver;
  1335. int i;
  1336. if (udc->gadget.speed == USB_SPEED_UNKNOWN)
  1337. driver = NULL;
  1338. udc->gadget.speed = USB_SPEED_UNKNOWN;
  1339. udc->suspended = 0;
  1340. for (i = 0; i < NUM_ENDPOINTS; i++) {
  1341. struct lpc32xx_ep *ep = &udc->ep[i];
  1342. nuke(ep, -ESHUTDOWN);
  1343. }
  1344. if (driver) {
  1345. spin_unlock(&udc->lock);
  1346. driver->disconnect(&udc->gadget);
  1347. spin_lock(&udc->lock);
  1348. }
  1349. isp1301_pullup_enable(udc, 0, 0);
  1350. udc_disable(udc);
  1351. udc_reinit(udc);
  1352. }
  1353. /*
  1354. * Activate or kill host pullup
  1355. * Can be called with or without lock
  1356. */
  1357. static void pullup(struct lpc32xx_udc *udc, int is_on)
  1358. {
  1359. if (!udc->clocked)
  1360. return;
  1361. if (!udc->enabled || !udc->vbus)
  1362. is_on = 0;
  1363. if (is_on != udc->pullup)
  1364. isp1301_pullup_enable(udc, is_on, 0);
  1365. }
  1366. /* Must be called without lock */
  1367. static int lpc32xx_ep_disable(struct usb_ep *_ep)
  1368. {
  1369. struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
  1370. struct lpc32xx_udc *udc = ep->udc;
  1371. unsigned long flags;
  1372. if ((ep->hwep_num_base == 0) || (ep->hwep_num == 0))
  1373. return -EINVAL;
  1374. spin_lock_irqsave(&udc->lock, flags);
  1375. nuke(ep, -ESHUTDOWN);
  1376. /* Clear all DMA statuses for this EP */
  1377. udc_ep_dma_disable(udc, ep->hwep_num);
  1378. writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
  1379. writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
  1380. writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
  1381. writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
  1382. /* Remove the DD pointer in the UDCA */
  1383. udc->udca_v_base[ep->hwep_num] = 0;
  1384. /* Disable and reset endpoint and interrupt */
  1385. uda_clear_hwepint(udc, ep->hwep_num);
  1386. udc_unrealize_hwep(udc, ep->hwep_num);
  1387. ep->hwep_num = 0;
  1388. spin_unlock_irqrestore(&udc->lock, flags);
  1389. atomic_dec(&udc->enabled_ep_cnt);
  1390. wake_up(&udc->ep_disable_wait_queue);
  1391. return 0;
  1392. }
  1393. /* Must be called without lock */
  1394. static int lpc32xx_ep_enable(struct usb_ep *_ep,
  1395. const struct usb_endpoint_descriptor *desc)
  1396. {
  1397. struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
  1398. struct lpc32xx_udc *udc = ep->udc;
  1399. u16 maxpacket;
  1400. u32 tmp;
  1401. unsigned long flags;
  1402. /* Verify EP data */
  1403. if ((!_ep) || (!ep) || (!desc) ||
  1404. (desc->bDescriptorType != USB_DT_ENDPOINT)) {
  1405. dev_dbg(udc->dev, "bad ep or descriptor\n");
  1406. return -EINVAL;
  1407. }
  1408. maxpacket = usb_endpoint_maxp(desc);
  1409. if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) {
  1410. dev_dbg(udc->dev, "bad ep descriptor's packet size\n");
  1411. return -EINVAL;
  1412. }
  1413. /* Don't touch EP0 */
  1414. if (ep->hwep_num_base == 0) {
  1415. dev_dbg(udc->dev, "Can't re-enable EP0!!!\n");
  1416. return -EINVAL;
  1417. }
  1418. /* Is driver ready? */
  1419. if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
  1420. dev_dbg(udc->dev, "bogus device state\n");
  1421. return -ESHUTDOWN;
  1422. }
  1423. tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
  1424. switch (tmp) {
  1425. case USB_ENDPOINT_XFER_CONTROL:
  1426. return -EINVAL;
  1427. case USB_ENDPOINT_XFER_INT:
  1428. if (maxpacket > ep->maxpacket) {
  1429. dev_dbg(udc->dev,
  1430. "Bad INT endpoint maxpacket %d\n", maxpacket);
  1431. return -EINVAL;
  1432. }
  1433. break;
  1434. case USB_ENDPOINT_XFER_BULK:
  1435. switch (maxpacket) {
  1436. case 8:
  1437. case 16:
  1438. case 32:
  1439. case 64:
  1440. break;
  1441. default:
  1442. dev_dbg(udc->dev,
  1443. "Bad BULK endpoint maxpacket %d\n", maxpacket);
  1444. return -EINVAL;
  1445. }
  1446. break;
  1447. case USB_ENDPOINT_XFER_ISOC:
  1448. break;
  1449. }
  1450. spin_lock_irqsave(&udc->lock, flags);
  1451. /* Initialize endpoint to match the selected descriptor */
  1452. ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
  1453. ep->ep.maxpacket = maxpacket;
  1454. /* Map hardware endpoint from base and direction */
  1455. if (ep->is_in)
  1456. /* IN endpoints are offset 1 from the OUT endpoint */
  1457. ep->hwep_num = ep->hwep_num_base + EP_IN;
  1458. else
  1459. ep->hwep_num = ep->hwep_num_base;
  1460. ep_dbg(ep, "EP enabled: %s, HW:%d, MP:%d IN:%d\n", ep->ep.name,
  1461. ep->hwep_num, maxpacket, (ep->is_in == 1));
  1462. /* Realize the endpoint, interrupt is enabled later when
  1463. * buffers are queued, IN EPs will NAK until buffers are ready */
  1464. udc_realize_hwep(udc, ep->hwep_num, ep->ep.maxpacket);
  1465. udc_clr_buffer_hwep(udc, ep->hwep_num);
  1466. uda_disable_hwepint(udc, ep->hwep_num);
  1467. udc_clrstall_hwep(udc, ep->hwep_num);
  1468. /* Clear all DMA statuses for this EP */
  1469. udc_ep_dma_disable(udc, ep->hwep_num);
  1470. writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
  1471. writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
  1472. writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
  1473. writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
  1474. spin_unlock_irqrestore(&udc->lock, flags);
  1475. atomic_inc(&udc->enabled_ep_cnt);
  1476. return 0;
  1477. }
  1478. /*
  1479. * Allocate a USB request list
  1480. * Can be called with or without lock
  1481. */
  1482. static struct usb_request *lpc32xx_ep_alloc_request(struct usb_ep *_ep,
  1483. gfp_t gfp_flags)
  1484. {
  1485. struct lpc32xx_request *req;
  1486. req = kzalloc(sizeof(struct lpc32xx_request), gfp_flags);
  1487. if (!req)
  1488. return NULL;
  1489. INIT_LIST_HEAD(&req->queue);
  1490. return &req->req;
  1491. }
  1492. /*
  1493. * De-allocate a USB request list
  1494. * Can be called with or without lock
  1495. */
  1496. static void lpc32xx_ep_free_request(struct usb_ep *_ep,
  1497. struct usb_request *_req)
  1498. {
  1499. struct lpc32xx_request *req;
  1500. req = container_of(_req, struct lpc32xx_request, req);
  1501. BUG_ON(!list_empty(&req->queue));
  1502. kfree(req);
  1503. }
  1504. /* Must be called without lock */
  1505. static int lpc32xx_ep_queue(struct usb_ep *_ep,
  1506. struct usb_request *_req, gfp_t gfp_flags)
  1507. {
  1508. struct lpc32xx_request *req;
  1509. struct lpc32xx_ep *ep;
  1510. struct lpc32xx_udc *udc;
  1511. unsigned long flags;
  1512. int status = 0;
  1513. req = container_of(_req, struct lpc32xx_request, req);
  1514. ep = container_of(_ep, struct lpc32xx_ep, ep);
  1515. if (!_req || !_req->complete || !_req->buf ||
  1516. !list_empty(&req->queue))
  1517. return -EINVAL;
  1518. udc = ep->udc;
  1519. if (!_ep) {
  1520. dev_dbg(udc->dev, "invalid ep\n");
  1521. return -EINVAL;
  1522. }
  1523. if ((!udc) || (!udc->driver) ||
  1524. (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
  1525. dev_dbg(udc->dev, "invalid device\n");
  1526. return -EINVAL;
  1527. }
  1528. if (ep->lep) {
  1529. struct lpc32xx_usbd_dd_gad *dd;
  1530. status = usb_gadget_map_request(&udc->gadget, _req, ep->is_in);
  1531. if (status)
  1532. return status;
  1533. /* For the request, build a list of DDs */
  1534. dd = udc_dd_alloc(udc);
  1535. if (!dd) {
  1536. /* Error allocating DD */
  1537. return -ENOMEM;
  1538. }
  1539. req->dd_desc_ptr = dd;
  1540. /* Setup the DMA descriptor */
  1541. dd->dd_next_phy = dd->dd_next_v = 0;
  1542. dd->dd_buffer_addr = req->req.dma;
  1543. dd->dd_status = 0;
  1544. /* Special handling for ISO EPs */
  1545. if (ep->eptype == EP_ISO_TYPE) {
  1546. dd->dd_setup = DD_SETUP_ISO_EP |
  1547. DD_SETUP_PACKETLEN(0) |
  1548. DD_SETUP_DMALENBYTES(1);
  1549. dd->dd_iso_ps_mem_addr = dd->this_dma + 24;
  1550. if (ep->is_in)
  1551. dd->iso_status[0] = req->req.length;
  1552. else
  1553. dd->iso_status[0] = 0;
  1554. } else
  1555. dd->dd_setup = DD_SETUP_PACKETLEN(ep->ep.maxpacket) |
  1556. DD_SETUP_DMALENBYTES(req->req.length);
  1557. }
  1558. ep_dbg(ep, "%s queue req %p len %d buf %p (in=%d) z=%d\n", _ep->name,
  1559. _req, _req->length, _req->buf, ep->is_in, _req->zero);
  1560. spin_lock_irqsave(&udc->lock, flags);
  1561. _req->status = -EINPROGRESS;
  1562. _req->actual = 0;
  1563. req->send_zlp = _req->zero;
  1564. /* Kickstart empty queues */
  1565. if (list_empty(&ep->queue)) {
  1566. list_add_tail(&req->queue, &ep->queue);
  1567. if (ep->hwep_num_base == 0) {
  1568. /* Handle expected data direction */
  1569. if (ep->is_in) {
  1570. /* IN packet to host */
  1571. udc->ep0state = DATA_IN;
  1572. status = udc_ep0_in_req(udc);
  1573. } else {
  1574. /* OUT packet from host */
  1575. udc->ep0state = DATA_OUT;
  1576. status = udc_ep0_out_req(udc);
  1577. }
  1578. } else if (ep->is_in) {
  1579. /* IN packet to host and kick off transfer */
  1580. if (!ep->req_pending)
  1581. udc_ep_in_req_dma(udc, ep);
  1582. } else
  1583. /* OUT packet from host and kick off list */
  1584. if (!ep->req_pending)
  1585. udc_ep_out_req_dma(udc, ep);
  1586. } else
  1587. list_add_tail(&req->queue, &ep->queue);
  1588. spin_unlock_irqrestore(&udc->lock, flags);
  1589. return (status < 0) ? status : 0;
  1590. }
  1591. /* Must be called without lock */
  1592. static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  1593. {
  1594. struct lpc32xx_ep *ep;
  1595. struct lpc32xx_request *req;
  1596. unsigned long flags;
  1597. ep = container_of(_ep, struct lpc32xx_ep, ep);
  1598. if (!_ep || ep->hwep_num_base == 0)
  1599. return -EINVAL;
  1600. spin_lock_irqsave(&ep->udc->lock, flags);
  1601. /* make sure it's actually queued on this endpoint */
  1602. list_for_each_entry(req, &ep->queue, queue) {
  1603. if (&req->req == _req)
  1604. break;
  1605. }
  1606. if (&req->req != _req) {
  1607. spin_unlock_irqrestore(&ep->udc->lock, flags);
  1608. return -EINVAL;
  1609. }
  1610. done(ep, req, -ECONNRESET);
  1611. spin_unlock_irqrestore(&ep->udc->lock, flags);
  1612. return 0;
  1613. }
  1614. /* Must be called without lock */
  1615. static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
  1616. {
  1617. struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
  1618. struct lpc32xx_udc *udc = ep->udc;
  1619. unsigned long flags;
  1620. if ((!ep) || (ep->hwep_num <= 1))
  1621. return -EINVAL;
  1622. /* Don't halt an IN EP */
  1623. if (ep->is_in)
  1624. return -EAGAIN;
  1625. spin_lock_irqsave(&udc->lock, flags);
  1626. if (value == 1) {
  1627. /* stall */
  1628. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
  1629. DAT_WR_BYTE(EP_STAT_ST));
  1630. } else {
  1631. /* End stall */
  1632. ep->wedge = 0;
  1633. udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
  1634. DAT_WR_BYTE(0));
  1635. }
  1636. spin_unlock_irqrestore(&udc->lock, flags);
  1637. return 0;
  1638. }
  1639. /* set the halt feature and ignores clear requests */
  1640. static int lpc32xx_ep_set_wedge(struct usb_ep *_ep)
  1641. {
  1642. struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
  1643. if (!_ep || !ep->udc)
  1644. return -EINVAL;
  1645. ep->wedge = 1;
  1646. return usb_ep_set_halt(_ep);
  1647. }
  1648. static const struct usb_ep_ops lpc32xx_ep_ops = {
  1649. .enable = lpc32xx_ep_enable,
  1650. .disable = lpc32xx_ep_disable,
  1651. .alloc_request = lpc32xx_ep_alloc_request,
  1652. .free_request = lpc32xx_ep_free_request,
  1653. .queue = lpc32xx_ep_queue,
  1654. .dequeue = lpc32xx_ep_dequeue,
  1655. .set_halt = lpc32xx_ep_set_halt,
  1656. .set_wedge = lpc32xx_ep_set_wedge,
  1657. };
  1658. /* Send a ZLP on a non-0 IN EP */
  1659. void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  1660. {
  1661. /* Clear EP status */
  1662. udc_clearep_getsts(udc, ep->hwep_num);
  1663. /* Send ZLP via FIFO mechanism */
  1664. udc_write_hwep(udc, ep->hwep_num, NULL, 0);
  1665. }
  1666. /*
  1667. * Handle EP completion for ZLP
  1668. * This function will only be called when a delayed ZLP needs to be sent out
  1669. * after a DMA transfer has filled both buffers.
  1670. */
  1671. void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  1672. {
  1673. u32 epstatus;
  1674. struct lpc32xx_request *req;
  1675. if (ep->hwep_num <= 0)
  1676. return;
  1677. uda_clear_hwepint(udc, ep->hwep_num);
  1678. /* If this interrupt isn't enabled, return now */
  1679. if (!(udc->enabled_hwepints & (1 << ep->hwep_num)))
  1680. return;
  1681. /* Get endpoint status */
  1682. epstatus = udc_clearep_getsts(udc, ep->hwep_num);
  1683. /*
  1684. * This should never happen, but protect against writing to the
  1685. * buffer when full.
  1686. */
  1687. if (epstatus & EP_SEL_F)
  1688. return;
  1689. if (ep->is_in) {
  1690. udc_send_in_zlp(udc, ep);
  1691. uda_disable_hwepint(udc, ep->hwep_num);
  1692. } else
  1693. return;
  1694. /* If there isn't a request waiting, something went wrong */
  1695. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  1696. if (req) {
  1697. done(ep, req, 0);
  1698. /* Start another request if ready */
  1699. if (!list_empty(&ep->queue)) {
  1700. if (ep->is_in)
  1701. udc_ep_in_req_dma(udc, ep);
  1702. else
  1703. udc_ep_out_req_dma(udc, ep);
  1704. } else
  1705. ep->req_pending = 0;
  1706. }
  1707. }
  1708. /* DMA end of transfer completion */
  1709. static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
  1710. {
  1711. u32 status, epstatus;
  1712. struct lpc32xx_request *req;
  1713. struct lpc32xx_usbd_dd_gad *dd;
  1714. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  1715. ep->totalints++;
  1716. #endif
  1717. req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
  1718. if (!req) {
  1719. ep_err(ep, "DMA interrupt on no req!\n");
  1720. return;
  1721. }
  1722. dd = req->dd_desc_ptr;
  1723. /* DMA descriptor should always be retired for this call */
  1724. if (!(dd->dd_status & DD_STATUS_DD_RETIRED))
  1725. ep_warn(ep, "DMA descriptor did not retire\n");
  1726. /* Disable DMA */
  1727. udc_ep_dma_disable(udc, ep->hwep_num);
  1728. writel((1 << ep->hwep_num), USBD_EOTINTCLR(udc->udp_baseaddr));
  1729. writel((1 << ep->hwep_num), USBD_NDDRTINTCLR(udc->udp_baseaddr));
  1730. /* System error? */
  1731. if (readl(USBD_SYSERRTINTST(udc->udp_baseaddr)) &
  1732. (1 << ep->hwep_num)) {
  1733. writel((1 << ep->hwep_num),
  1734. USBD_SYSERRTINTCLR(udc->udp_baseaddr));
  1735. ep_err(ep, "AHB critical error!\n");
  1736. ep->req_pending = 0;
  1737. /* The error could have occurred on a packet of a multipacket
  1738. * transfer, so recovering the transfer is not possible. Close
  1739. * the request with an error */
  1740. done(ep, req, -ECONNABORTED);
  1741. return;
  1742. }
  1743. /* Handle the current DD's status */
  1744. status = dd->dd_status;
  1745. switch (status & DD_STATUS_STS_MASK) {
  1746. case DD_STATUS_STS_NS:
  1747. /* DD not serviced? This shouldn't happen! */
  1748. ep->req_pending = 0;
  1749. ep_err(ep, "DMA critical EP error: DD not serviced (0x%x)!\n",
  1750. status);
  1751. done(ep, req, -ECONNABORTED);
  1752. return;
  1753. case DD_STATUS_STS_BS:
  1754. /* Interrupt only fires on EOT - This shouldn't happen! */
  1755. ep->req_pending = 0;
  1756. ep_err(ep, "DMA critical EP error: EOT prior to service completion (0x%x)!\n",
  1757. status);
  1758. done(ep, req, -ECONNABORTED);
  1759. return;
  1760. case DD_STATUS_STS_NC:
  1761. case DD_STATUS_STS_DUR:
  1762. /* Really just a short packet, not an underrun */
  1763. /* This is a good status and what we expect */
  1764. break;
  1765. default:
  1766. /* Data overrun, system error, or unknown */
  1767. ep->req_pending = 0;
  1768. ep_err(ep, "DMA critical EP error: System error (0x%x)!\n",
  1769. status);
  1770. done(ep, req, -ECONNABORTED);
  1771. return;
  1772. }
  1773. /* ISO endpoints are handled differently */
  1774. if (ep->eptype == EP_ISO_TYPE) {
  1775. if (ep->is_in)
  1776. req->req.actual = req->req.length;
  1777. else
  1778. req->req.actual = dd->iso_status[0] & 0xFFFF;
  1779. } else
  1780. req->req.actual += DD_STATUS_CURDMACNT(status);
  1781. /* Send a ZLP if necessary. This will be done for non-int
  1782. * packets which have a size that is a divisor of MAXP */
  1783. if (req->send_zlp) {
  1784. /*
  1785. * If at least 1 buffer is available, send the ZLP now.
  1786. * Otherwise, the ZLP send needs to be deferred until a
  1787. * buffer is available.
  1788. */
  1789. if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) {
  1790. udc_clearep_getsts(udc, ep->hwep_num);
  1791. uda_enable_hwepint(udc, ep->hwep_num);
  1792. epstatus = udc_clearep_getsts(udc, ep->hwep_num);
  1793. /* Let the EP interrupt handle the ZLP */
  1794. return;
  1795. } else
  1796. udc_send_in_zlp(udc, ep);
  1797. }
  1798. /* Transfer request is complete */
  1799. done(ep, req, 0);
  1800. /* Start another request if ready */
  1801. udc_clearep_getsts(udc, ep->hwep_num);
  1802. if (!list_empty((&ep->queue))) {
  1803. if (ep->is_in)
  1804. udc_ep_in_req_dma(udc, ep);
  1805. else
  1806. udc_ep_out_req_dma(udc, ep);
  1807. } else
  1808. ep->req_pending = 0;
  1809. }
  1810. /*
  1811. *
  1812. * Endpoint 0 functions
  1813. *
  1814. */
  1815. static void udc_handle_dev(struct lpc32xx_udc *udc)
  1816. {
  1817. u32 tmp;
  1818. udc_protocol_cmd_w(udc, CMD_GET_DEV_STAT);
  1819. tmp = udc_protocol_cmd_r(udc, DAT_GET_DEV_STAT);
  1820. if (tmp & DEV_RST)
  1821. uda_usb_reset(udc);
  1822. else if (tmp & DEV_CON_CH)
  1823. uda_power_event(udc, (tmp & DEV_CON));
  1824. else if (tmp & DEV_SUS_CH) {
  1825. if (tmp & DEV_SUS) {
  1826. if (udc->vbus == 0)
  1827. stop_activity(udc);
  1828. else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
  1829. udc->driver) {
  1830. /* Power down transceiver */
  1831. udc->poweron = 0;
  1832. schedule_work(&udc->pullup_job);
  1833. uda_resm_susp_event(udc, 1);
  1834. }
  1835. } else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
  1836. udc->driver && udc->vbus) {
  1837. uda_resm_susp_event(udc, 0);
  1838. /* Power up transceiver */
  1839. udc->poweron = 1;
  1840. schedule_work(&udc->pullup_job);
  1841. }
  1842. }
  1843. }
  1844. static int udc_get_status(struct lpc32xx_udc *udc, u16 reqtype, u16 wIndex)
  1845. {
  1846. struct lpc32xx_ep *ep;
  1847. u32 ep0buff = 0, tmp;
  1848. switch (reqtype & USB_RECIP_MASK) {
  1849. case USB_RECIP_INTERFACE:
  1850. break; /* Not supported */
  1851. case USB_RECIP_DEVICE:
  1852. ep0buff = (udc->selfpowered << USB_DEVICE_SELF_POWERED);
  1853. if (udc->dev_status & (1 << USB_DEVICE_REMOTE_WAKEUP))
  1854. ep0buff |= (1 << USB_DEVICE_REMOTE_WAKEUP);
  1855. break;
  1856. case USB_RECIP_ENDPOINT:
  1857. tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
  1858. ep = &udc->ep[tmp];
  1859. if ((tmp == 0) || (tmp >= NUM_ENDPOINTS))
  1860. return -EOPNOTSUPP;
  1861. if (wIndex & USB_DIR_IN) {
  1862. if (!ep->is_in)
  1863. return -EOPNOTSUPP; /* Something's wrong */
  1864. } else if (ep->is_in)
  1865. return -EOPNOTSUPP; /* Not an IN endpoint */
  1866. /* Get status of the endpoint */
  1867. udc_protocol_cmd_w(udc, CMD_SEL_EP(ep->hwep_num));
  1868. tmp = udc_protocol_cmd_r(udc, DAT_SEL_EP(ep->hwep_num));
  1869. if (tmp & EP_SEL_ST)
  1870. ep0buff = (1 << USB_ENDPOINT_HALT);
  1871. else
  1872. ep0buff = 0;
  1873. break;
  1874. default:
  1875. break;
  1876. }
  1877. /* Return data */
  1878. udc_write_hwep(udc, EP_IN, &ep0buff, 2);
  1879. return 0;
  1880. }
  1881. static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
  1882. {
  1883. struct lpc32xx_ep *ep, *ep0 = &udc->ep[0];
  1884. struct usb_ctrlrequest ctrlpkt;
  1885. int i, bytes;
  1886. u16 wIndex, wValue, wLength, reqtype, req, tmp;
  1887. /* Nuke previous transfers */
  1888. nuke(ep0, -EPROTO);
  1889. /* Get setup packet */
  1890. bytes = udc_read_hwep(udc, EP_OUT, (u32 *) &ctrlpkt, 8);
  1891. if (bytes != 8) {
  1892. ep_warn(ep0, "Incorrectly sized setup packet (s/b 8, is %d)!\n",
  1893. bytes);
  1894. return;
  1895. }
  1896. /* Native endianness */
  1897. wIndex = le16_to_cpu(ctrlpkt.wIndex);
  1898. wValue = le16_to_cpu(ctrlpkt.wValue);
  1899. wLength = le16_to_cpu(ctrlpkt.wLength);
  1900. reqtype = le16_to_cpu(ctrlpkt.bRequestType);
  1901. /* Set direction of EP0 */
  1902. if (likely(reqtype & USB_DIR_IN))
  1903. ep0->is_in = 1;
  1904. else
  1905. ep0->is_in = 0;
  1906. /* Handle SETUP packet */
  1907. req = le16_to_cpu(ctrlpkt.bRequest);
  1908. switch (req) {
  1909. case USB_REQ_CLEAR_FEATURE:
  1910. case USB_REQ_SET_FEATURE:
  1911. switch (reqtype) {
  1912. case (USB_TYPE_STANDARD | USB_RECIP_DEVICE):
  1913. if (wValue != USB_DEVICE_REMOTE_WAKEUP)
  1914. goto stall; /* Nothing else handled */
  1915. /* Tell board about event */
  1916. if (req == USB_REQ_CLEAR_FEATURE)
  1917. udc->dev_status &=
  1918. ~(1 << USB_DEVICE_REMOTE_WAKEUP);
  1919. else
  1920. udc->dev_status |=
  1921. (1 << USB_DEVICE_REMOTE_WAKEUP);
  1922. uda_remwkp_cgh(udc);
  1923. goto zlp_send;
  1924. case (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
  1925. tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
  1926. if ((wValue != USB_ENDPOINT_HALT) ||
  1927. (tmp >= NUM_ENDPOINTS))
  1928. break;
  1929. /* Find hardware endpoint from logical endpoint */
  1930. ep = &udc->ep[tmp];
  1931. tmp = ep->hwep_num;
  1932. if (tmp == 0)
  1933. break;
  1934. if (req == USB_REQ_SET_FEATURE)
  1935. udc_stall_hwep(udc, tmp);
  1936. else if (!ep->wedge)
  1937. udc_clrstall_hwep(udc, tmp);
  1938. goto zlp_send;
  1939. default:
  1940. break;
  1941. }
  1942. case USB_REQ_SET_ADDRESS:
  1943. if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
  1944. udc_set_address(udc, wValue);
  1945. goto zlp_send;
  1946. }
  1947. break;
  1948. case USB_REQ_GET_STATUS:
  1949. udc_get_status(udc, reqtype, wIndex);
  1950. return;
  1951. default:
  1952. break; /* Let GadgetFS handle the descriptor instead */
  1953. }
  1954. if (likely(udc->driver)) {
  1955. /* device-2-host (IN) or no data setup command, process
  1956. * immediately */
  1957. spin_unlock(&udc->lock);
  1958. i = udc->driver->setup(&udc->gadget, &ctrlpkt);
  1959. spin_lock(&udc->lock);
  1960. if (req == USB_REQ_SET_CONFIGURATION) {
  1961. /* Configuration is set after endpoints are realized */
  1962. if (wValue) {
  1963. /* Set configuration */
  1964. udc_set_device_configured(udc);
  1965. udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
  1966. DAT_WR_BYTE(AP_CLK |
  1967. INAK_BI | INAK_II));
  1968. } else {
  1969. /* Clear configuration */
  1970. udc_set_device_unconfigured(udc);
  1971. /* Disable NAK interrupts */
  1972. udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
  1973. DAT_WR_BYTE(AP_CLK));
  1974. }
  1975. }
  1976. if (i < 0) {
  1977. /* setup processing failed, force stall */
  1978. dev_dbg(udc->dev,
  1979. "req %02x.%02x protocol STALL; stat %d\n",
  1980. reqtype, req, i);
  1981. udc->ep0state = WAIT_FOR_SETUP;
  1982. goto stall;
  1983. }
  1984. }
  1985. if (!ep0->is_in)
  1986. udc_ep0_send_zlp(udc); /* ZLP IN packet on data phase */
  1987. return;
  1988. stall:
  1989. udc_stall_hwep(udc, EP_IN);
  1990. return;
  1991. zlp_send:
  1992. udc_ep0_send_zlp(udc);
  1993. return;
  1994. }
  1995. /* IN endpoint 0 transfer */
  1996. static void udc_handle_ep0_in(struct lpc32xx_udc *udc)
  1997. {
  1998. struct lpc32xx_ep *ep0 = &udc->ep[0];
  1999. u32 epstatus;
  2000. /* Clear EP interrupt */
  2001. epstatus = udc_clearep_getsts(udc, EP_IN);
  2002. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  2003. ep0->totalints++;
  2004. #endif
  2005. /* Stalled? Clear stall and reset buffers */
  2006. if (epstatus & EP_SEL_ST) {
  2007. udc_clrstall_hwep(udc, EP_IN);
  2008. nuke(ep0, -ECONNABORTED);
  2009. udc->ep0state = WAIT_FOR_SETUP;
  2010. return;
  2011. }
  2012. /* Is a buffer available? */
  2013. if (!(epstatus & EP_SEL_F)) {
  2014. /* Handle based on current state */
  2015. if (udc->ep0state == DATA_IN)
  2016. udc_ep0_in_req(udc);
  2017. else {
  2018. /* Unknown state for EP0 oe end of DATA IN phase */
  2019. nuke(ep0, -ECONNABORTED);
  2020. udc->ep0state = WAIT_FOR_SETUP;
  2021. }
  2022. }
  2023. }
  2024. /* OUT endpoint 0 transfer */
  2025. static void udc_handle_ep0_out(struct lpc32xx_udc *udc)
  2026. {
  2027. struct lpc32xx_ep *ep0 = &udc->ep[0];
  2028. u32 epstatus;
  2029. /* Clear EP interrupt */
  2030. epstatus = udc_clearep_getsts(udc, EP_OUT);
  2031. #ifdef CONFIG_USB_GADGET_DEBUG_FILES
  2032. ep0->totalints++;
  2033. #endif
  2034. /* Stalled? */
  2035. if (epstatus & EP_SEL_ST) {
  2036. udc_clrstall_hwep(udc, EP_OUT);
  2037. nuke(ep0, -ECONNABORTED);
  2038. udc->ep0state = WAIT_FOR_SETUP;
  2039. return;
  2040. }
  2041. /* A NAK may occur if a packet couldn't be received yet */
  2042. if (epstatus & EP_SEL_EPN)
  2043. return;
  2044. /* Setup packet incoming? */
  2045. if (epstatus & EP_SEL_STP) {
  2046. nuke(ep0, 0);
  2047. udc->ep0state = WAIT_FOR_SETUP;
  2048. }
  2049. /* Data available? */
  2050. if (epstatus & EP_SEL_F)
  2051. /* Handle based on current state */
  2052. switch (udc->ep0state) {
  2053. case WAIT_FOR_SETUP:
  2054. udc_handle_ep0_setup(udc);
  2055. break;
  2056. case DATA_OUT:
  2057. udc_ep0_out_req(udc);
  2058. break;
  2059. default:
  2060. /* Unknown state for EP0 */
  2061. nuke(ep0, -ECONNABORTED);
  2062. udc->ep0state = WAIT_FOR_SETUP;
  2063. }
  2064. }
  2065. /* Must be called without lock */
  2066. static int lpc32xx_get_frame(struct usb_gadget *gadget)
  2067. {
  2068. int frame;
  2069. unsigned long flags;
  2070. struct lpc32xx_udc *udc = to_udc(gadget);
  2071. if (!udc->clocked)
  2072. return -EINVAL;
  2073. spin_lock_irqsave(&udc->lock, flags);
  2074. frame = (int) udc_get_current_frame(udc);
  2075. spin_unlock_irqrestore(&udc->lock, flags);
  2076. return frame;
  2077. }
  2078. static int lpc32xx_wakeup(struct usb_gadget *gadget)
  2079. {
  2080. return -ENOTSUPP;
  2081. }
  2082. static int lpc32xx_set_selfpowered(struct usb_gadget *gadget, int is_on)
  2083. {
  2084. struct lpc32xx_udc *udc = to_udc(gadget);
  2085. /* Always self-powered */
  2086. udc->selfpowered = (is_on != 0);
  2087. return 0;
  2088. }
  2089. /*
  2090. * vbus is here! turn everything on that's ready
  2091. * Must be called without lock
  2092. */
  2093. static int lpc32xx_vbus_session(struct usb_gadget *gadget, int is_active)
  2094. {
  2095. unsigned long flags;
  2096. struct lpc32xx_udc *udc = to_udc(gadget);
  2097. spin_lock_irqsave(&udc->lock, flags);
  2098. /* Doesn't need lock */
  2099. if (udc->driver) {
  2100. udc_clk_set(udc, 1);
  2101. udc_enable(udc);
  2102. pullup(udc, is_active);
  2103. } else {
  2104. stop_activity(udc);
  2105. pullup(udc, 0);
  2106. spin_unlock_irqrestore(&udc->lock, flags);
  2107. /*
  2108. * Wait for all the endpoints to disable,
  2109. * before disabling clocks. Don't wait if
  2110. * endpoints are not enabled.
  2111. */
  2112. if (atomic_read(&udc->enabled_ep_cnt))
  2113. wait_event_interruptible(udc->ep_disable_wait_queue,
  2114. (atomic_read(&udc->enabled_ep_cnt) == 0));
  2115. spin_lock_irqsave(&udc->lock, flags);
  2116. udc_clk_set(udc, 0);
  2117. }
  2118. spin_unlock_irqrestore(&udc->lock, flags);
  2119. return 0;
  2120. }
  2121. /* Can be called with or without lock */
  2122. static int lpc32xx_pullup(struct usb_gadget *gadget, int is_on)
  2123. {
  2124. struct lpc32xx_udc *udc = to_udc(gadget);
  2125. /* Doesn't need lock */
  2126. pullup(udc, is_on);
  2127. return 0;
  2128. }
  2129. static int lpc32xx_start(struct usb_gadget *, struct usb_gadget_driver *);
  2130. static int lpc32xx_stop(struct usb_gadget *);
  2131. static const struct usb_gadget_ops lpc32xx_udc_ops = {
  2132. .get_frame = lpc32xx_get_frame,
  2133. .wakeup = lpc32xx_wakeup,
  2134. .set_selfpowered = lpc32xx_set_selfpowered,
  2135. .vbus_session = lpc32xx_vbus_session,
  2136. .pullup = lpc32xx_pullup,
  2137. .udc_start = lpc32xx_start,
  2138. .udc_stop = lpc32xx_stop,
  2139. };
  2140. static void nop_release(struct device *dev)
  2141. {
  2142. /* nothing to free */
  2143. }
  2144. static const struct lpc32xx_udc controller_template = {
  2145. .gadget = {
  2146. .ops = &lpc32xx_udc_ops,
  2147. .name = driver_name,
  2148. .dev = {
  2149. .init_name = "gadget",
  2150. .release = nop_release,
  2151. }
  2152. },
  2153. .ep[0] = {
  2154. .ep = {
  2155. .name = "ep0",
  2156. .ops = &lpc32xx_ep_ops,
  2157. },
  2158. .maxpacket = 64,
  2159. .hwep_num_base = 0,
  2160. .hwep_num = 0, /* Can be 0 or 1, has special handling */
  2161. .lep = 0,
  2162. .eptype = EP_CTL_TYPE,
  2163. },
  2164. .ep[1] = {
  2165. .ep = {
  2166. .name = "ep1-int",
  2167. .ops = &lpc32xx_ep_ops,
  2168. },
  2169. .maxpacket = 64,
  2170. .hwep_num_base = 2,
  2171. .hwep_num = 0, /* 2 or 3, will be set later */
  2172. .lep = 1,
  2173. .eptype = EP_INT_TYPE,
  2174. },
  2175. .ep[2] = {
  2176. .ep = {
  2177. .name = "ep2-bulk",
  2178. .ops = &lpc32xx_ep_ops,
  2179. },
  2180. .maxpacket = 64,
  2181. .hwep_num_base = 4,
  2182. .hwep_num = 0, /* 4 or 5, will be set later */
  2183. .lep = 2,
  2184. .eptype = EP_BLK_TYPE,
  2185. },
  2186. .ep[3] = {
  2187. .ep = {
  2188. .name = "ep3-iso",
  2189. .ops = &lpc32xx_ep_ops,
  2190. },
  2191. .maxpacket = 1023,
  2192. .hwep_num_base = 6,
  2193. .hwep_num = 0, /* 6 or 7, will be set later */
  2194. .lep = 3,
  2195. .eptype = EP_ISO_TYPE,
  2196. },
  2197. .ep[4] = {
  2198. .ep = {
  2199. .name = "ep4-int",
  2200. .ops = &lpc32xx_ep_ops,
  2201. },
  2202. .maxpacket = 64,
  2203. .hwep_num_base = 8,
  2204. .hwep_num = 0, /* 8 or 9, will be set later */
  2205. .lep = 4,
  2206. .eptype = EP_INT_TYPE,
  2207. },
  2208. .ep[5] = {
  2209. .ep = {
  2210. .name = "ep5-bulk",
  2211. .ops = &lpc32xx_ep_ops,
  2212. },
  2213. .maxpacket = 64,
  2214. .hwep_num_base = 10,
  2215. .hwep_num = 0, /* 10 or 11, will be set later */
  2216. .lep = 5,
  2217. .eptype = EP_BLK_TYPE,
  2218. },
  2219. .ep[6] = {
  2220. .ep = {
  2221. .name = "ep6-iso",
  2222. .ops = &lpc32xx_ep_ops,
  2223. },
  2224. .maxpacket = 1023,
  2225. .hwep_num_base = 12,
  2226. .hwep_num = 0, /* 12 or 13, will be set later */
  2227. .lep = 6,
  2228. .eptype = EP_ISO_TYPE,
  2229. },
  2230. .ep[7] = {
  2231. .ep = {
  2232. .name = "ep7-int",
  2233. .ops = &lpc32xx_ep_ops,
  2234. },
  2235. .maxpacket = 64,
  2236. .hwep_num_base = 14,
  2237. .hwep_num = 0,
  2238. .lep = 7,
  2239. .eptype = EP_INT_TYPE,
  2240. },
  2241. .ep[8] = {
  2242. .ep = {
  2243. .name = "ep8-bulk",
  2244. .ops = &lpc32xx_ep_ops,
  2245. },
  2246. .maxpacket = 64,
  2247. .hwep_num_base = 16,
  2248. .hwep_num = 0,
  2249. .lep = 8,
  2250. .eptype = EP_BLK_TYPE,
  2251. },
  2252. .ep[9] = {
  2253. .ep = {
  2254. .name = "ep9-iso",
  2255. .ops = &lpc32xx_ep_ops,
  2256. },
  2257. .maxpacket = 1023,
  2258. .hwep_num_base = 18,
  2259. .hwep_num = 0,
  2260. .lep = 9,
  2261. .eptype = EP_ISO_TYPE,
  2262. },
  2263. .ep[10] = {
  2264. .ep = {
  2265. .name = "ep10-int",
  2266. .ops = &lpc32xx_ep_ops,
  2267. },
  2268. .maxpacket = 64,
  2269. .hwep_num_base = 20,
  2270. .hwep_num = 0,
  2271. .lep = 10,
  2272. .eptype = EP_INT_TYPE,
  2273. },
  2274. .ep[11] = {
  2275. .ep = {
  2276. .name = "ep11-bulk",
  2277. .ops = &lpc32xx_ep_ops,
  2278. },
  2279. .maxpacket = 64,
  2280. .hwep_num_base = 22,
  2281. .hwep_num = 0,
  2282. .lep = 11,
  2283. .eptype = EP_BLK_TYPE,
  2284. },
  2285. .ep[12] = {
  2286. .ep = {
  2287. .name = "ep12-iso",
  2288. .ops = &lpc32xx_ep_ops,
  2289. },
  2290. .maxpacket = 1023,
  2291. .hwep_num_base = 24,
  2292. .hwep_num = 0,
  2293. .lep = 12,
  2294. .eptype = EP_ISO_TYPE,
  2295. },
  2296. .ep[13] = {
  2297. .ep = {
  2298. .name = "ep13-int",
  2299. .ops = &lpc32xx_ep_ops,
  2300. },
  2301. .maxpacket = 64,
  2302. .hwep_num_base = 26,
  2303. .hwep_num = 0,
  2304. .lep = 13,
  2305. .eptype = EP_INT_TYPE,
  2306. },
  2307. .ep[14] = {
  2308. .ep = {
  2309. .name = "ep14-bulk",
  2310. .ops = &lpc32xx_ep_ops,
  2311. },
  2312. .maxpacket = 64,
  2313. .hwep_num_base = 28,
  2314. .hwep_num = 0,
  2315. .lep = 14,
  2316. .eptype = EP_BLK_TYPE,
  2317. },
  2318. .ep[15] = {
  2319. .ep = {
  2320. .name = "ep15-bulk",
  2321. .ops = &lpc32xx_ep_ops,
  2322. },
  2323. .maxpacket = 1023,
  2324. .hwep_num_base = 30,
  2325. .hwep_num = 0,
  2326. .lep = 15,
  2327. .eptype = EP_BLK_TYPE,
  2328. },
  2329. };
  2330. /* ISO and status interrupts */
  2331. static irqreturn_t lpc32xx_usb_lp_irq(int irq, void *_udc)
  2332. {
  2333. u32 tmp, devstat;
  2334. struct lpc32xx_udc *udc = _udc;
  2335. spin_lock(&udc->lock);
  2336. /* Read the device status register */
  2337. devstat = readl(USBD_DEVINTST(udc->udp_baseaddr));
  2338. devstat &= ~USBD_EP_FAST;
  2339. writel(devstat, USBD_DEVINTCLR(udc->udp_baseaddr));
  2340. devstat = devstat & udc->enabled_devints;
  2341. /* Device specific handling needed? */
  2342. if (devstat & USBD_DEV_STAT)
  2343. udc_handle_dev(udc);
  2344. /* Start of frame? (devstat & FRAME_INT):
  2345. * The frame interrupt isn't really needed for ISO support,
  2346. * as the driver will queue the necessary packets */
  2347. /* Error? */
  2348. if (devstat & ERR_INT) {
  2349. /* All types of errors, from cable removal during transfer to
  2350. * misc protocol and bit errors. These are mostly for just info,
  2351. * as the USB hardware will work around these. If these errors
  2352. * happen alot, something is wrong. */
  2353. udc_protocol_cmd_w(udc, CMD_RD_ERR_STAT);
  2354. tmp = udc_protocol_cmd_r(udc, DAT_RD_ERR_STAT);
  2355. dev_dbg(udc->dev, "Device error (0x%x)!\n", tmp);
  2356. }
  2357. spin_unlock(&udc->lock);
  2358. return IRQ_HANDLED;
  2359. }
  2360. /* EP interrupts */
  2361. static irqreturn_t lpc32xx_usb_hp_irq(int irq, void *_udc)
  2362. {
  2363. u32 tmp;
  2364. struct lpc32xx_udc *udc = _udc;
  2365. spin_lock(&udc->lock);
  2366. /* Read the device status register */
  2367. writel(USBD_EP_FAST, USBD_DEVINTCLR(udc->udp_baseaddr));
  2368. /* Endpoints */
  2369. tmp = readl(USBD_EPINTST(udc->udp_baseaddr));
  2370. /* Special handling for EP0 */
  2371. if (tmp & (EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
  2372. /* Handle EP0 IN */
  2373. if (tmp & (EP_MASK_SEL(0, EP_IN)))
  2374. udc_handle_ep0_in(udc);
  2375. /* Handle EP0 OUT */
  2376. if (tmp & (EP_MASK_SEL(0, EP_OUT)))
  2377. udc_handle_ep0_out(udc);
  2378. }
  2379. /* All other EPs */
  2380. if (tmp & ~(EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
  2381. int i;
  2382. /* Handle other EP interrupts */
  2383. for (i = 1; i < NUM_ENDPOINTS; i++) {
  2384. if (tmp & (1 << udc->ep[i].hwep_num))
  2385. udc_handle_eps(udc, &udc->ep[i]);
  2386. }
  2387. }
  2388. spin_unlock(&udc->lock);
  2389. return IRQ_HANDLED;
  2390. }
  2391. static irqreturn_t lpc32xx_usb_devdma_irq(int irq, void *_udc)
  2392. {
  2393. struct lpc32xx_udc *udc = _udc;
  2394. int i;
  2395. u32 tmp;
  2396. spin_lock(&udc->lock);
  2397. /* Handle EP DMA EOT interrupts */
  2398. tmp = readl(USBD_EOTINTST(udc->udp_baseaddr)) |
  2399. (readl(USBD_EPDMAST(udc->udp_baseaddr)) &
  2400. readl(USBD_NDDRTINTST(udc->udp_baseaddr))) |
  2401. readl(USBD_SYSERRTINTST(udc->udp_baseaddr));
  2402. for (i = 1; i < NUM_ENDPOINTS; i++) {
  2403. if (tmp & (1 << udc->ep[i].hwep_num))
  2404. udc_handle_dma_ep(udc, &udc->ep[i]);
  2405. }
  2406. spin_unlock(&udc->lock);
  2407. return IRQ_HANDLED;
  2408. }
  2409. /*
  2410. *
  2411. * VBUS detection, pullup handler, and Gadget cable state notification
  2412. *
  2413. */
  2414. static void vbus_work(struct work_struct *work)
  2415. {
  2416. u8 value;
  2417. struct lpc32xx_udc *udc = container_of(work, struct lpc32xx_udc,
  2418. vbus_job);
  2419. if (udc->enabled != 0) {
  2420. /* Discharge VBUS real quick */
  2421. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  2422. ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
  2423. /* Give VBUS some time (100mS) to discharge */
  2424. msleep(100);
  2425. /* Disable VBUS discharge resistor */
  2426. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  2427. ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
  2428. OTG1_VBUS_DISCHRG);
  2429. /* Clear interrupt */
  2430. i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
  2431. ISP1301_I2C_INTERRUPT_LATCH |
  2432. ISP1301_I2C_REG_CLEAR_ADDR, ~0);
  2433. /* Get the VBUS status from the transceiver */
  2434. value = i2c_smbus_read_byte_data(udc->isp1301_i2c_client,
  2435. ISP1301_I2C_INTERRUPT_SOURCE);
  2436. /* VBUS on or off? */
  2437. if (value & INT_SESS_VLD)
  2438. udc->vbus = 1;
  2439. else
  2440. udc->vbus = 0;
  2441. /* VBUS changed? */
  2442. if (udc->last_vbus != udc->vbus) {
  2443. udc->last_vbus = udc->vbus;
  2444. lpc32xx_vbus_session(&udc->gadget, udc->vbus);
  2445. }
  2446. }
  2447. /* Re-enable after completion */
  2448. enable_irq(udc->udp_irq[IRQ_USB_ATX]);
  2449. }
  2450. static irqreturn_t lpc32xx_usb_vbus_irq(int irq, void *_udc)
  2451. {
  2452. struct lpc32xx_udc *udc = _udc;
  2453. /* Defer handling of VBUS IRQ to work queue */
  2454. disable_irq_nosync(udc->udp_irq[IRQ_USB_ATX]);
  2455. schedule_work(&udc->vbus_job);
  2456. return IRQ_HANDLED;
  2457. }
  2458. static int lpc32xx_start(struct usb_gadget *gadget,
  2459. struct usb_gadget_driver *driver)
  2460. {
  2461. struct lpc32xx_udc *udc = to_udc(gadget);
  2462. int i;
  2463. if (!driver || driver->max_speed < USB_SPEED_FULL || !driver->setup) {
  2464. dev_err(udc->dev, "bad parameter.\n");
  2465. return -EINVAL;
  2466. }
  2467. if (udc->driver) {
  2468. dev_err(udc->dev, "UDC already has a gadget driver\n");
  2469. return -EBUSY;
  2470. }
  2471. udc->driver = driver;
  2472. udc->gadget.dev.of_node = udc->dev->of_node;
  2473. udc->enabled = 1;
  2474. udc->selfpowered = 1;
  2475. udc->vbus = 0;
  2476. /* Force VBUS process once to check for cable insertion */
  2477. udc->last_vbus = udc->vbus = 0;
  2478. schedule_work(&udc->vbus_job);
  2479. /* Do not re-enable ATX IRQ (3) */
  2480. for (i = IRQ_USB_LP; i < IRQ_USB_ATX; i++)
  2481. enable_irq(udc->udp_irq[i]);
  2482. return 0;
  2483. }
  2484. static int lpc32xx_stop(struct usb_gadget *gadget)
  2485. {
  2486. int i;
  2487. struct lpc32xx_udc *udc = to_udc(gadget);
  2488. for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
  2489. disable_irq(udc->udp_irq[i]);
  2490. if (udc->clocked) {
  2491. spin_lock(&udc->lock);
  2492. stop_activity(udc);
  2493. spin_unlock(&udc->lock);
  2494. /*
  2495. * Wait for all the endpoints to disable,
  2496. * before disabling clocks. Don't wait if
  2497. * endpoints are not enabled.
  2498. */
  2499. if (atomic_read(&udc->enabled_ep_cnt))
  2500. wait_event_interruptible(udc->ep_disable_wait_queue,
  2501. (atomic_read(&udc->enabled_ep_cnt) == 0));
  2502. spin_lock(&udc->lock);
  2503. udc_clk_set(udc, 0);
  2504. spin_unlock(&udc->lock);
  2505. }
  2506. udc->enabled = 0;
  2507. udc->driver = NULL;
  2508. return 0;
  2509. }
  2510. static void lpc32xx_udc_shutdown(struct platform_device *dev)
  2511. {
  2512. /* Force disconnect on reboot */
  2513. struct lpc32xx_udc *udc = platform_get_drvdata(dev);
  2514. pullup(udc, 0);
  2515. }
  2516. /*
  2517. * Callbacks to be overridden by options passed via OF (TODO)
  2518. */
  2519. static void lpc32xx_usbd_conn_chg(int conn)
  2520. {
  2521. /* Do nothing, it might be nice to enable an LED
  2522. * based on conn state being !0 */
  2523. }
  2524. static void lpc32xx_usbd_susp_chg(int susp)
  2525. {
  2526. /* Device suspend if susp != 0 */
  2527. }
  2528. static void lpc32xx_rmwkup_chg(int remote_wakup_enable)
  2529. {
  2530. /* Enable or disable USB remote wakeup */
  2531. }
  2532. struct lpc32xx_usbd_cfg lpc32xx_usbddata = {
  2533. .vbus_drv_pol = 0,
  2534. .conn_chgb = &lpc32xx_usbd_conn_chg,
  2535. .susp_chgb = &lpc32xx_usbd_susp_chg,
  2536. .rmwk_chgb = &lpc32xx_rmwkup_chg,
  2537. };
  2538. static u64 lpc32xx_usbd_dmamask = ~(u32) 0x7F;
  2539. static int lpc32xx_udc_probe(struct platform_device *pdev)
  2540. {
  2541. struct device *dev = &pdev->dev;
  2542. struct lpc32xx_udc *udc;
  2543. int retval, i;
  2544. struct resource *res;
  2545. dma_addr_t dma_handle;
  2546. struct device_node *isp1301_node;
  2547. udc = kmemdup(&controller_template, sizeof(*udc), GFP_KERNEL);
  2548. if (!udc)
  2549. return -ENOMEM;
  2550. for (i = 0; i <= 15; i++)
  2551. udc->ep[i].udc = udc;
  2552. udc->gadget.ep0 = &udc->ep[0].ep;
  2553. /* init software state */
  2554. udc->gadget.dev.parent = dev;
  2555. udc->pdev = pdev;
  2556. udc->dev = &pdev->dev;
  2557. udc->enabled = 0;
  2558. if (pdev->dev.of_node) {
  2559. isp1301_node = of_parse_phandle(pdev->dev.of_node,
  2560. "transceiver", 0);
  2561. } else {
  2562. isp1301_node = NULL;
  2563. }
  2564. udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
  2565. if (!udc->isp1301_i2c_client) {
  2566. retval = -EPROBE_DEFER;
  2567. goto phy_fail;
  2568. }
  2569. dev_info(udc->dev, "ISP1301 I2C device at address 0x%x\n",
  2570. udc->isp1301_i2c_client->addr);
  2571. pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
  2572. retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
  2573. if (retval)
  2574. goto resource_fail;
  2575. udc->board = &lpc32xx_usbddata;
  2576. /*
  2577. * Resources are mapped as follows:
  2578. * IORESOURCE_MEM, base address and size of USB space
  2579. * IORESOURCE_IRQ, USB device low priority interrupt number
  2580. * IORESOURCE_IRQ, USB device high priority interrupt number
  2581. * IORESOURCE_IRQ, USB device interrupt number
  2582. * IORESOURCE_IRQ, USB transceiver interrupt number
  2583. */
  2584. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2585. if (!res) {
  2586. retval = -ENXIO;
  2587. goto resource_fail;
  2588. }
  2589. spin_lock_init(&udc->lock);
  2590. /* Get IRQs */
  2591. for (i = 0; i < 4; i++) {
  2592. udc->udp_irq[i] = platform_get_irq(pdev, i);
  2593. if (udc->udp_irq[i] < 0) {
  2594. dev_err(udc->dev,
  2595. "irq resource %d not available!\n", i);
  2596. retval = udc->udp_irq[i];
  2597. goto irq_fail;
  2598. }
  2599. }
  2600. udc->io_p_start = res->start;
  2601. udc->io_p_size = resource_size(res);
  2602. if (!request_mem_region(udc->io_p_start, udc->io_p_size, driver_name)) {
  2603. dev_err(udc->dev, "someone's using UDC memory\n");
  2604. retval = -EBUSY;
  2605. goto request_mem_region_fail;
  2606. }
  2607. udc->udp_baseaddr = ioremap(udc->io_p_start, udc->io_p_size);
  2608. if (!udc->udp_baseaddr) {
  2609. retval = -ENOMEM;
  2610. dev_err(udc->dev, "IO map failure\n");
  2611. goto io_map_fail;
  2612. }
  2613. /* Enable AHB slave USB clock, needed for further USB clock control */
  2614. writel(USB_SLAVE_HCLK_EN | (1 << 19), USB_CTRL);
  2615. /* Get required clocks */
  2616. udc->usb_pll_clk = clk_get(&pdev->dev, "ck_pll5");
  2617. if (IS_ERR(udc->usb_pll_clk)) {
  2618. dev_err(udc->dev, "failed to acquire USB PLL\n");
  2619. retval = PTR_ERR(udc->usb_pll_clk);
  2620. goto pll_get_fail;
  2621. }
  2622. udc->usb_slv_clk = clk_get(&pdev->dev, "ck_usbd");
  2623. if (IS_ERR(udc->usb_slv_clk)) {
  2624. dev_err(udc->dev, "failed to acquire USB device clock\n");
  2625. retval = PTR_ERR(udc->usb_slv_clk);
  2626. goto usb_clk_get_fail;
  2627. }
  2628. udc->usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg");
  2629. if (IS_ERR(udc->usb_otg_clk)) {
  2630. dev_err(udc->dev, "failed to acquire USB otg clock\n");
  2631. retval = PTR_ERR(udc->usb_otg_clk);
  2632. goto usb_otg_clk_get_fail;
  2633. }
  2634. /* Setup PLL clock to 48MHz */
  2635. retval = clk_enable(udc->usb_pll_clk);
  2636. if (retval < 0) {
  2637. dev_err(udc->dev, "failed to start USB PLL\n");
  2638. goto pll_enable_fail;
  2639. }
  2640. retval = clk_set_rate(udc->usb_pll_clk, 48000);
  2641. if (retval < 0) {
  2642. dev_err(udc->dev, "failed to set USB clock rate\n");
  2643. goto pll_set_fail;
  2644. }
  2645. writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN, USB_CTRL);
  2646. /* Enable USB device clock */
  2647. retval = clk_enable(udc->usb_slv_clk);
  2648. if (retval < 0) {
  2649. dev_err(udc->dev, "failed to start USB device clock\n");
  2650. goto usb_clk_enable_fail;
  2651. }
  2652. /* Enable USB OTG clock */
  2653. retval = clk_enable(udc->usb_otg_clk);
  2654. if (retval < 0) {
  2655. dev_err(udc->dev, "failed to start USB otg clock\n");
  2656. goto usb_otg_clk_enable_fail;
  2657. }
  2658. /* Setup deferred workqueue data */
  2659. udc->poweron = udc->pullup = 0;
  2660. INIT_WORK(&udc->pullup_job, pullup_work);
  2661. INIT_WORK(&udc->vbus_job, vbus_work);
  2662. #ifdef CONFIG_PM
  2663. INIT_WORK(&udc->power_job, power_work);
  2664. #endif
  2665. /* All clocks are now on */
  2666. udc->clocked = 1;
  2667. isp1301_udc_configure(udc);
  2668. /* Allocate memory for the UDCA */
  2669. udc->udca_v_base = dma_alloc_coherent(&pdev->dev, UDCA_BUFF_SIZE,
  2670. &dma_handle,
  2671. (GFP_KERNEL | GFP_DMA));
  2672. if (!udc->udca_v_base) {
  2673. dev_err(udc->dev, "error getting UDCA region\n");
  2674. retval = -ENOMEM;
  2675. goto i2c_fail;
  2676. }
  2677. udc->udca_p_base = dma_handle;
  2678. dev_dbg(udc->dev, "DMA buffer(0x%x bytes), P:0x%08x, V:0x%p\n",
  2679. UDCA_BUFF_SIZE, udc->udca_p_base, udc->udca_v_base);
  2680. /* Setup the DD DMA memory pool */
  2681. udc->dd_cache = dma_pool_create("udc_dd", udc->dev,
  2682. sizeof(struct lpc32xx_usbd_dd_gad),
  2683. sizeof(u32), 0);
  2684. if (!udc->dd_cache) {
  2685. dev_err(udc->dev, "error getting DD DMA region\n");
  2686. retval = -ENOMEM;
  2687. goto dma_alloc_fail;
  2688. }
  2689. /* Clear USB peripheral and initialize gadget endpoints */
  2690. udc_disable(udc);
  2691. udc_reinit(udc);
  2692. /* Request IRQs - low and high priority USB device IRQs are routed to
  2693. * the same handler, while the DMA interrupt is routed elsewhere */
  2694. retval = request_irq(udc->udp_irq[IRQ_USB_LP], lpc32xx_usb_lp_irq,
  2695. 0, "udc_lp", udc);
  2696. if (retval < 0) {
  2697. dev_err(udc->dev, "LP request irq %d failed\n",
  2698. udc->udp_irq[IRQ_USB_LP]);
  2699. goto irq_lp_fail;
  2700. }
  2701. retval = request_irq(udc->udp_irq[IRQ_USB_HP], lpc32xx_usb_hp_irq,
  2702. 0, "udc_hp", udc);
  2703. if (retval < 0) {
  2704. dev_err(udc->dev, "HP request irq %d failed\n",
  2705. udc->udp_irq[IRQ_USB_HP]);
  2706. goto irq_hp_fail;
  2707. }
  2708. retval = request_irq(udc->udp_irq[IRQ_USB_DEVDMA],
  2709. lpc32xx_usb_devdma_irq, 0, "udc_dma", udc);
  2710. if (retval < 0) {
  2711. dev_err(udc->dev, "DEV request irq %d failed\n",
  2712. udc->udp_irq[IRQ_USB_DEVDMA]);
  2713. goto irq_dev_fail;
  2714. }
  2715. /* The transceiver interrupt is used for VBUS detection and will
  2716. kick off the VBUS handler function */
  2717. retval = request_irq(udc->udp_irq[IRQ_USB_ATX], lpc32xx_usb_vbus_irq,
  2718. 0, "udc_otg", udc);
  2719. if (retval < 0) {
  2720. dev_err(udc->dev, "VBUS request irq %d failed\n",
  2721. udc->udp_irq[IRQ_USB_ATX]);
  2722. goto irq_xcvr_fail;
  2723. }
  2724. /* Initialize wait queue */
  2725. init_waitqueue_head(&udc->ep_disable_wait_queue);
  2726. atomic_set(&udc->enabled_ep_cnt, 0);
  2727. /* Keep all IRQs disabled until GadgetFS starts up */
  2728. for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
  2729. disable_irq(udc->udp_irq[i]);
  2730. retval = usb_add_gadget_udc(dev, &udc->gadget);
  2731. if (retval < 0)
  2732. goto add_gadget_fail;
  2733. dev_set_drvdata(dev, udc);
  2734. device_init_wakeup(dev, 1);
  2735. create_debug_file(udc);
  2736. /* Disable clocks for now */
  2737. udc_clk_set(udc, 0);
  2738. dev_info(udc->dev, "%s version %s\n", driver_name, DRIVER_VERSION);
  2739. return 0;
  2740. add_gadget_fail:
  2741. free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
  2742. irq_xcvr_fail:
  2743. free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
  2744. irq_dev_fail:
  2745. free_irq(udc->udp_irq[IRQ_USB_HP], udc);
  2746. irq_hp_fail:
  2747. free_irq(udc->udp_irq[IRQ_USB_LP], udc);
  2748. irq_lp_fail:
  2749. dma_pool_destroy(udc->dd_cache);
  2750. dma_alloc_fail:
  2751. dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
  2752. udc->udca_v_base, udc->udca_p_base);
  2753. i2c_fail:
  2754. clk_disable(udc->usb_otg_clk);
  2755. usb_otg_clk_enable_fail:
  2756. clk_disable(udc->usb_slv_clk);
  2757. usb_clk_enable_fail:
  2758. pll_set_fail:
  2759. clk_disable(udc->usb_pll_clk);
  2760. pll_enable_fail:
  2761. clk_put(udc->usb_otg_clk);
  2762. usb_otg_clk_get_fail:
  2763. clk_put(udc->usb_slv_clk);
  2764. usb_clk_get_fail:
  2765. clk_put(udc->usb_pll_clk);
  2766. pll_get_fail:
  2767. iounmap(udc->udp_baseaddr);
  2768. io_map_fail:
  2769. release_mem_region(udc->io_p_start, udc->io_p_size);
  2770. dev_err(udc->dev, "%s probe failed, %d\n", driver_name, retval);
  2771. request_mem_region_fail:
  2772. irq_fail:
  2773. resource_fail:
  2774. phy_fail:
  2775. kfree(udc);
  2776. return retval;
  2777. }
  2778. static int lpc32xx_udc_remove(struct platform_device *pdev)
  2779. {
  2780. struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
  2781. usb_del_gadget_udc(&udc->gadget);
  2782. if (udc->driver)
  2783. return -EBUSY;
  2784. udc_clk_set(udc, 1);
  2785. udc_disable(udc);
  2786. pullup(udc, 0);
  2787. free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
  2788. device_init_wakeup(&pdev->dev, 0);
  2789. remove_debug_file(udc);
  2790. dma_pool_destroy(udc->dd_cache);
  2791. dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
  2792. udc->udca_v_base, udc->udca_p_base);
  2793. free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
  2794. free_irq(udc->udp_irq[IRQ_USB_HP], udc);
  2795. free_irq(udc->udp_irq[IRQ_USB_LP], udc);
  2796. clk_disable(udc->usb_otg_clk);
  2797. clk_put(udc->usb_otg_clk);
  2798. clk_disable(udc->usb_slv_clk);
  2799. clk_put(udc->usb_slv_clk);
  2800. clk_disable(udc->usb_pll_clk);
  2801. clk_put(udc->usb_pll_clk);
  2802. iounmap(udc->udp_baseaddr);
  2803. release_mem_region(udc->io_p_start, udc->io_p_size);
  2804. kfree(udc);
  2805. return 0;
  2806. }
  2807. #ifdef CONFIG_PM
  2808. static int lpc32xx_udc_suspend(struct platform_device *pdev, pm_message_t mesg)
  2809. {
  2810. struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
  2811. if (udc->clocked) {
  2812. /* Power down ISP */
  2813. udc->poweron = 0;
  2814. isp1301_set_powerstate(udc, 0);
  2815. /* Disable clocking */
  2816. udc_clk_set(udc, 0);
  2817. /* Keep clock flag on, so we know to re-enable clocks
  2818. on resume */
  2819. udc->clocked = 1;
  2820. /* Kill global USB clock */
  2821. clk_disable(udc->usb_slv_clk);
  2822. }
  2823. return 0;
  2824. }
  2825. static int lpc32xx_udc_resume(struct platform_device *pdev)
  2826. {
  2827. struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
  2828. if (udc->clocked) {
  2829. /* Enable global USB clock */
  2830. clk_enable(udc->usb_slv_clk);
  2831. /* Enable clocking */
  2832. udc_clk_set(udc, 1);
  2833. /* ISP back to normal power mode */
  2834. udc->poweron = 1;
  2835. isp1301_set_powerstate(udc, 1);
  2836. }
  2837. return 0;
  2838. }
  2839. #else
  2840. #define lpc32xx_udc_suspend NULL
  2841. #define lpc32xx_udc_resume NULL
  2842. #endif
  2843. #ifdef CONFIG_OF
  2844. static const struct of_device_id lpc32xx_udc_of_match[] = {
  2845. { .compatible = "nxp,lpc3220-udc", },
  2846. { },
  2847. };
  2848. MODULE_DEVICE_TABLE(of, lpc32xx_udc_of_match);
  2849. #endif
  2850. static struct platform_driver lpc32xx_udc_driver = {
  2851. .remove = lpc32xx_udc_remove,
  2852. .shutdown = lpc32xx_udc_shutdown,
  2853. .suspend = lpc32xx_udc_suspend,
  2854. .resume = lpc32xx_udc_resume,
  2855. .driver = {
  2856. .name = (char *) driver_name,
  2857. .of_match_table = of_match_ptr(lpc32xx_udc_of_match),
  2858. },
  2859. };
  2860. module_platform_driver_probe(lpc32xx_udc_driver, lpc32xx_udc_probe);
  2861. MODULE_DESCRIPTION("LPC32XX udc driver");
  2862. MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
  2863. MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
  2864. MODULE_LICENSE("GPL");
  2865. MODULE_ALIAS("platform:lpc32xx_udc");