musb_core.c 75 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * MUSB OTG driver core code
  4. *
  5. * Copyright 2005 Mentor Graphics Corporation
  6. * Copyright (C) 2005-2006 by Texas Instruments
  7. * Copyright (C) 2006-2007 Nokia Corporation
  8. */
  9. /*
  10. * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
  11. *
  12. * This consists of a Host Controller Driver (HCD) and a peripheral
  13. * controller driver implementing the "Gadget" API; OTG support is
  14. * in the works. These are normal Linux-USB controller drivers which
  15. * use IRQs and have no dedicated thread.
  16. *
  17. * This version of the driver has only been used with products from
  18. * Texas Instruments. Those products integrate the Inventra logic
  19. * with other DMA, IRQ, and bus modules, as well as other logic that
  20. * needs to be reflected in this driver.
  21. *
  22. *
  23. * NOTE: the original Mentor code here was pretty much a collection
  24. * of mechanisms that don't seem to have been fully integrated/working
  25. * for *any* Linux kernel version. This version aims at Linux 2.6.now,
  26. * Key open issues include:
  27. *
  28. * - Lack of host-side transaction scheduling, for all transfer types.
  29. * The hardware doesn't do it; instead, software must.
  30. *
  31. * This is not an issue for OTG devices that don't support external
  32. * hubs, but for more "normal" USB hosts it's a user issue that the
  33. * "multipoint" support doesn't scale in the expected ways. That
  34. * includes DaVinci EVM in a common non-OTG mode.
  35. *
  36. * * Control and bulk use dedicated endpoints, and there's as
  37. * yet no mechanism to either (a) reclaim the hardware when
  38. * peripherals are NAKing, which gets complicated with bulk
  39. * endpoints, or (b) use more than a single bulk endpoint in
  40. * each direction.
  41. *
  42. * RESULT: one device may be perceived as blocking another one.
  43. *
  44. * * Interrupt and isochronous will dynamically allocate endpoint
  45. * hardware, but (a) there's no record keeping for bandwidth;
  46. * (b) in the common case that few endpoints are available, there
  47. * is no mechanism to reuse endpoints to talk to multiple devices.
  48. *
  49. * RESULT: At one extreme, bandwidth can be overcommitted in
  50. * some hardware configurations, no faults will be reported.
  51. * At the other extreme, the bandwidth capabilities which do
  52. * exist tend to be severely undercommitted. You can't yet hook
  53. * up both a keyboard and a mouse to an external USB hub.
  54. */
  55. /*
  56. * This gets many kinds of configuration information:
  57. * - Kconfig for everything user-configurable
  58. * - platform_device for addressing, irq, and platform_data
  59. * - platform_data is mostly for board-specific information
  60. * (plus recentrly, SOC or family details)
  61. *
  62. * Most of the conditional compilation will (someday) vanish.
  63. */
  64. #include <linux/module.h>
  65. #include <linux/kernel.h>
  66. #include <linux/sched.h>
  67. #include <linux/slab.h>
  68. #include <linux/list.h>
  69. #include <linux/kobject.h>
  70. #include <linux/prefetch.h>
  71. #include <linux/platform_device.h>
  72. #include <linux/io.h>
  73. #include <linux/dma-mapping.h>
  74. #include <linux/usb.h>
  75. #include <linux/usb/of.h>
  76. #include "musb_core.h"
  77. #include "musb_trace.h"
  78. #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
  79. #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
  80. #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
  81. #define MUSB_VERSION "6.0"
  82. #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
  83. #define MUSB_DRIVER_NAME "musb-hdrc"
  84. const char musb_driver_name[] = MUSB_DRIVER_NAME;
  85. MODULE_DESCRIPTION(DRIVER_INFO);
  86. MODULE_AUTHOR(DRIVER_AUTHOR);
  87. MODULE_LICENSE("GPL");
  88. MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
  89. /*-------------------------------------------------------------------------*/
  90. static inline struct musb *dev_to_musb(struct device *dev)
  91. {
  92. return dev_get_drvdata(dev);
  93. }
  94. enum musb_mode musb_get_mode(struct device *dev)
  95. {
  96. enum usb_dr_mode mode;
  97. mode = usb_get_dr_mode(dev);
  98. switch (mode) {
  99. case USB_DR_MODE_HOST:
  100. return MUSB_HOST;
  101. case USB_DR_MODE_PERIPHERAL:
  102. return MUSB_PERIPHERAL;
  103. case USB_DR_MODE_OTG:
  104. case USB_DR_MODE_UNKNOWN:
  105. default:
  106. return MUSB_OTG;
  107. }
  108. }
  109. EXPORT_SYMBOL_GPL(musb_get_mode);
  110. /*-------------------------------------------------------------------------*/
  111. #ifndef CONFIG_BLACKFIN
  112. static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
  113. {
  114. void __iomem *addr = phy->io_priv;
  115. int i = 0;
  116. u8 r;
  117. u8 power;
  118. int ret;
  119. pm_runtime_get_sync(phy->io_dev);
  120. /* Make sure the transceiver is not in low power mode */
  121. power = musb_readb(addr, MUSB_POWER);
  122. power &= ~MUSB_POWER_SUSPENDM;
  123. musb_writeb(addr, MUSB_POWER, power);
  124. /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
  125. * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
  126. */
  127. musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
  128. musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
  129. MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
  130. while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
  131. & MUSB_ULPI_REG_CMPLT)) {
  132. i++;
  133. if (i == 10000) {
  134. ret = -ETIMEDOUT;
  135. goto out;
  136. }
  137. }
  138. r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
  139. r &= ~MUSB_ULPI_REG_CMPLT;
  140. musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
  141. ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
  142. out:
  143. pm_runtime_put(phy->io_dev);
  144. return ret;
  145. }
  146. static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
  147. {
  148. void __iomem *addr = phy->io_priv;
  149. int i = 0;
  150. u8 r = 0;
  151. u8 power;
  152. int ret = 0;
  153. pm_runtime_get_sync(phy->io_dev);
  154. /* Make sure the transceiver is not in low power mode */
  155. power = musb_readb(addr, MUSB_POWER);
  156. power &= ~MUSB_POWER_SUSPENDM;
  157. musb_writeb(addr, MUSB_POWER, power);
  158. musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
  159. musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
  160. musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
  161. while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
  162. & MUSB_ULPI_REG_CMPLT)) {
  163. i++;
  164. if (i == 10000) {
  165. ret = -ETIMEDOUT;
  166. goto out;
  167. }
  168. }
  169. r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
  170. r &= ~MUSB_ULPI_REG_CMPLT;
  171. musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
  172. out:
  173. pm_runtime_put(phy->io_dev);
  174. return ret;
  175. }
  176. #else
  177. #define musb_ulpi_read NULL
  178. #define musb_ulpi_write NULL
  179. #endif
  180. static struct usb_phy_io_ops musb_ulpi_access = {
  181. .read = musb_ulpi_read,
  182. .write = musb_ulpi_write,
  183. };
  184. /*-------------------------------------------------------------------------*/
  185. static u32 musb_default_fifo_offset(u8 epnum)
  186. {
  187. return 0x20 + (epnum * 4);
  188. }
  189. /* "flat" mapping: each endpoint has its own i/o address */
  190. static void musb_flat_ep_select(void __iomem *mbase, u8 epnum)
  191. {
  192. }
  193. static u32 musb_flat_ep_offset(u8 epnum, u16 offset)
  194. {
  195. return 0x100 + (0x10 * epnum) + offset;
  196. }
  197. /* "indexed" mapping: INDEX register controls register bank select */
  198. static void musb_indexed_ep_select(void __iomem *mbase, u8 epnum)
  199. {
  200. musb_writeb(mbase, MUSB_INDEX, epnum);
  201. }
  202. static u32 musb_indexed_ep_offset(u8 epnum, u16 offset)
  203. {
  204. return 0x10 + offset;
  205. }
  206. static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
  207. {
  208. return 0x80 + (0x08 * epnum) + offset;
  209. }
  210. static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
  211. {
  212. u8 data = __raw_readb(addr + offset);
  213. trace_musb_readb(__builtin_return_address(0), addr, offset, data);
  214. return data;
  215. }
  216. static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
  217. {
  218. trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
  219. __raw_writeb(data, addr + offset);
  220. }
  221. static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
  222. {
  223. u16 data = __raw_readw(addr + offset);
  224. trace_musb_readw(__builtin_return_address(0), addr, offset, data);
  225. return data;
  226. }
  227. static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
  228. {
  229. trace_musb_writew(__builtin_return_address(0), addr, offset, data);
  230. __raw_writew(data, addr + offset);
  231. }
  232. static u32 musb_default_readl(const void __iomem *addr, unsigned offset)
  233. {
  234. u32 data = __raw_readl(addr + offset);
  235. trace_musb_readl(__builtin_return_address(0), addr, offset, data);
  236. return data;
  237. }
  238. static void musb_default_writel(void __iomem *addr, unsigned offset, u32 data)
  239. {
  240. trace_musb_writel(__builtin_return_address(0), addr, offset, data);
  241. __raw_writel(data, addr + offset);
  242. }
  243. /*
  244. * Load an endpoint's FIFO
  245. */
  246. static void musb_default_write_fifo(struct musb_hw_ep *hw_ep, u16 len,
  247. const u8 *src)
  248. {
  249. struct musb *musb = hw_ep->musb;
  250. void __iomem *fifo = hw_ep->fifo;
  251. if (unlikely(len == 0))
  252. return;
  253. prefetch((u8 *)src);
  254. dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
  255. 'T', hw_ep->epnum, fifo, len, src);
  256. /* we can't assume unaligned reads work */
  257. if (likely((0x01 & (unsigned long) src) == 0)) {
  258. u16 index = 0;
  259. /* best case is 32bit-aligned source address */
  260. if ((0x02 & (unsigned long) src) == 0) {
  261. if (len >= 4) {
  262. iowrite32_rep(fifo, src + index, len >> 2);
  263. index += len & ~0x03;
  264. }
  265. if (len & 0x02) {
  266. __raw_writew(*(u16 *)&src[index], fifo);
  267. index += 2;
  268. }
  269. } else {
  270. if (len >= 2) {
  271. iowrite16_rep(fifo, src + index, len >> 1);
  272. index += len & ~0x01;
  273. }
  274. }
  275. if (len & 0x01)
  276. __raw_writeb(src[index], fifo);
  277. } else {
  278. /* byte aligned */
  279. iowrite8_rep(fifo, src, len);
  280. }
  281. }
  282. /*
  283. * Unload an endpoint's FIFO
  284. */
  285. static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
  286. {
  287. struct musb *musb = hw_ep->musb;
  288. void __iomem *fifo = hw_ep->fifo;
  289. if (unlikely(len == 0))
  290. return;
  291. dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
  292. 'R', hw_ep->epnum, fifo, len, dst);
  293. /* we can't assume unaligned writes work */
  294. if (likely((0x01 & (unsigned long) dst) == 0)) {
  295. u16 index = 0;
  296. /* best case is 32bit-aligned destination address */
  297. if ((0x02 & (unsigned long) dst) == 0) {
  298. if (len >= 4) {
  299. ioread32_rep(fifo, dst, len >> 2);
  300. index = len & ~0x03;
  301. }
  302. if (len & 0x02) {
  303. *(u16 *)&dst[index] = __raw_readw(fifo);
  304. index += 2;
  305. }
  306. } else {
  307. if (len >= 2) {
  308. ioread16_rep(fifo, dst, len >> 1);
  309. index = len & ~0x01;
  310. }
  311. }
  312. if (len & 0x01)
  313. dst[index] = __raw_readb(fifo);
  314. } else {
  315. /* byte aligned */
  316. ioread8_rep(fifo, dst, len);
  317. }
  318. }
  319. /*
  320. * Old style IO functions
  321. */
  322. u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
  323. EXPORT_SYMBOL_GPL(musb_readb);
  324. void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
  325. EXPORT_SYMBOL_GPL(musb_writeb);
  326. u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
  327. EXPORT_SYMBOL_GPL(musb_readw);
  328. void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
  329. EXPORT_SYMBOL_GPL(musb_writew);
  330. u32 (*musb_readl)(const void __iomem *addr, unsigned offset);
  331. EXPORT_SYMBOL_GPL(musb_readl);
  332. void (*musb_writel)(void __iomem *addr, unsigned offset, u32 data);
  333. EXPORT_SYMBOL_GPL(musb_writel);
  334. #ifndef CONFIG_MUSB_PIO_ONLY
  335. struct dma_controller *
  336. (*musb_dma_controller_create)(struct musb *musb, void __iomem *base);
  337. EXPORT_SYMBOL(musb_dma_controller_create);
  338. void (*musb_dma_controller_destroy)(struct dma_controller *c);
  339. EXPORT_SYMBOL(musb_dma_controller_destroy);
  340. #endif
  341. /*
  342. * New style IO functions
  343. */
  344. void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
  345. {
  346. return hw_ep->musb->io.read_fifo(hw_ep, len, dst);
  347. }
  348. void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
  349. {
  350. return hw_ep->musb->io.write_fifo(hw_ep, len, src);
  351. }
  352. /*-------------------------------------------------------------------------*/
  353. /* for high speed test mode; see USB 2.0 spec 7.1.20 */
  354. static const u8 musb_test_packet[53] = {
  355. /* implicit SYNC then DATA0 to start */
  356. /* JKJKJKJK x9 */
  357. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  358. /* JJKKJJKK x8 */
  359. 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
  360. /* JJJJKKKK x8 */
  361. 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
  362. /* JJJJJJJKKKKKKK x8 */
  363. 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  364. /* JJJJJJJK x8 */
  365. 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
  366. /* JKKKKKKK x10, JK */
  367. 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
  368. /* implicit CRC16 then EOP to end */
  369. };
  370. void musb_load_testpacket(struct musb *musb)
  371. {
  372. void __iomem *regs = musb->endpoints[0].regs;
  373. musb_ep_select(musb->mregs, 0);
  374. musb_write_fifo(musb->control_ep,
  375. sizeof(musb_test_packet), musb_test_packet);
  376. musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
  377. }
  378. /*-------------------------------------------------------------------------*/
  379. /*
  380. * Handles OTG hnp timeouts, such as b_ase0_brst
  381. */
  382. static void musb_otg_timer_func(struct timer_list *t)
  383. {
  384. struct musb *musb = from_timer(musb, t, otg_timer);
  385. unsigned long flags;
  386. spin_lock_irqsave(&musb->lock, flags);
  387. switch (musb->xceiv->otg->state) {
  388. case OTG_STATE_B_WAIT_ACON:
  389. musb_dbg(musb,
  390. "HNP: b_wait_acon timeout; back to b_peripheral");
  391. musb_g_disconnect(musb);
  392. musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
  393. musb->is_active = 0;
  394. break;
  395. case OTG_STATE_A_SUSPEND:
  396. case OTG_STATE_A_WAIT_BCON:
  397. musb_dbg(musb, "HNP: %s timeout",
  398. usb_otg_state_string(musb->xceiv->otg->state));
  399. musb_platform_set_vbus(musb, 0);
  400. musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
  401. break;
  402. default:
  403. musb_dbg(musb, "HNP: Unhandled mode %s",
  404. usb_otg_state_string(musb->xceiv->otg->state));
  405. }
  406. spin_unlock_irqrestore(&musb->lock, flags);
  407. }
  408. /*
  409. * Stops the HNP transition. Caller must take care of locking.
  410. */
  411. void musb_hnp_stop(struct musb *musb)
  412. {
  413. struct usb_hcd *hcd = musb->hcd;
  414. void __iomem *mbase = musb->mregs;
  415. u8 reg;
  416. musb_dbg(musb, "HNP: stop from %s",
  417. usb_otg_state_string(musb->xceiv->otg->state));
  418. switch (musb->xceiv->otg->state) {
  419. case OTG_STATE_A_PERIPHERAL:
  420. musb_g_disconnect(musb);
  421. musb_dbg(musb, "HNP: back to %s",
  422. usb_otg_state_string(musb->xceiv->otg->state));
  423. break;
  424. case OTG_STATE_B_HOST:
  425. musb_dbg(musb, "HNP: Disabling HR");
  426. if (hcd)
  427. hcd->self.is_b_host = 0;
  428. musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
  429. MUSB_DEV_MODE(musb);
  430. reg = musb_readb(mbase, MUSB_POWER);
  431. reg |= MUSB_POWER_SUSPENDM;
  432. musb_writeb(mbase, MUSB_POWER, reg);
  433. /* REVISIT: Start SESSION_REQUEST here? */
  434. break;
  435. default:
  436. musb_dbg(musb, "HNP: Stopping in unknown state %s",
  437. usb_otg_state_string(musb->xceiv->otg->state));
  438. }
  439. /*
  440. * When returning to A state after HNP, avoid hub_port_rebounce(),
  441. * which cause occasional OPT A "Did not receive reset after connect"
  442. * errors.
  443. */
  444. musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
  445. }
  446. static void musb_recover_from_babble(struct musb *musb);
  447. /*
  448. * Interrupt Service Routine to record USB "global" interrupts.
  449. * Since these do not happen often and signify things of
  450. * paramount importance, it seems OK to check them individually;
  451. * the order of the tests is specified in the manual
  452. *
  453. * @param musb instance pointer
  454. * @param int_usb register contents
  455. * @param devctl
  456. * @param power
  457. */
  458. static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
  459. u8 devctl)
  460. {
  461. irqreturn_t handled = IRQ_NONE;
  462. musb_dbg(musb, "<== DevCtl=%02x, int_usb=0x%x", devctl, int_usb);
  463. /* in host mode, the peripheral may issue remote wakeup.
  464. * in peripheral mode, the host may resume the link.
  465. * spurious RESUME irqs happen too, paired with SUSPEND.
  466. */
  467. if (int_usb & MUSB_INTR_RESUME) {
  468. handled = IRQ_HANDLED;
  469. musb_dbg(musb, "RESUME (%s)",
  470. usb_otg_state_string(musb->xceiv->otg->state));
  471. if (devctl & MUSB_DEVCTL_HM) {
  472. switch (musb->xceiv->otg->state) {
  473. case OTG_STATE_A_SUSPEND:
  474. /* remote wakeup? */
  475. musb->port1_status |=
  476. (USB_PORT_STAT_C_SUSPEND << 16)
  477. | MUSB_PORT_STAT_RESUME;
  478. musb->rh_timer = jiffies
  479. + msecs_to_jiffies(USB_RESUME_TIMEOUT);
  480. musb->xceiv->otg->state = OTG_STATE_A_HOST;
  481. musb->is_active = 1;
  482. musb_host_resume_root_hub(musb);
  483. schedule_delayed_work(&musb->finish_resume_work,
  484. msecs_to_jiffies(USB_RESUME_TIMEOUT));
  485. break;
  486. case OTG_STATE_B_WAIT_ACON:
  487. musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
  488. musb->is_active = 1;
  489. MUSB_DEV_MODE(musb);
  490. break;
  491. default:
  492. WARNING("bogus %s RESUME (%s)\n",
  493. "host",
  494. usb_otg_state_string(musb->xceiv->otg->state));
  495. }
  496. } else {
  497. switch (musb->xceiv->otg->state) {
  498. case OTG_STATE_A_SUSPEND:
  499. /* possibly DISCONNECT is upcoming */
  500. musb->xceiv->otg->state = OTG_STATE_A_HOST;
  501. musb_host_resume_root_hub(musb);
  502. break;
  503. case OTG_STATE_B_WAIT_ACON:
  504. case OTG_STATE_B_PERIPHERAL:
  505. /* disconnect while suspended? we may
  506. * not get a disconnect irq...
  507. */
  508. if ((devctl & MUSB_DEVCTL_VBUS)
  509. != (3 << MUSB_DEVCTL_VBUS_SHIFT)
  510. ) {
  511. musb->int_usb |= MUSB_INTR_DISCONNECT;
  512. musb->int_usb &= ~MUSB_INTR_SUSPEND;
  513. break;
  514. }
  515. musb_g_resume(musb);
  516. break;
  517. case OTG_STATE_B_IDLE:
  518. musb->int_usb &= ~MUSB_INTR_SUSPEND;
  519. break;
  520. default:
  521. WARNING("bogus %s RESUME (%s)\n",
  522. "peripheral",
  523. usb_otg_state_string(musb->xceiv->otg->state));
  524. }
  525. }
  526. }
  527. /* see manual for the order of the tests */
  528. if (int_usb & MUSB_INTR_SESSREQ) {
  529. void __iomem *mbase = musb->mregs;
  530. if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
  531. && (devctl & MUSB_DEVCTL_BDEVICE)) {
  532. musb_dbg(musb, "SessReq while on B state");
  533. return IRQ_HANDLED;
  534. }
  535. musb_dbg(musb, "SESSION_REQUEST (%s)",
  536. usb_otg_state_string(musb->xceiv->otg->state));
  537. /* IRQ arrives from ID pin sense or (later, if VBUS power
  538. * is removed) SRP. responses are time critical:
  539. * - turn on VBUS (with silicon-specific mechanism)
  540. * - go through A_WAIT_VRISE
  541. * - ... to A_WAIT_BCON.
  542. * a_wait_vrise_tmout triggers VBUS_ERROR transitions
  543. */
  544. musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
  545. musb->ep0_stage = MUSB_EP0_START;
  546. musb->xceiv->otg->state = OTG_STATE_A_IDLE;
  547. MUSB_HST_MODE(musb);
  548. musb_platform_set_vbus(musb, 1);
  549. handled = IRQ_HANDLED;
  550. }
  551. if (int_usb & MUSB_INTR_VBUSERROR) {
  552. int ignore = 0;
  553. /* During connection as an A-Device, we may see a short
  554. * current spikes causing voltage drop, because of cable
  555. * and peripheral capacitance combined with vbus draw.
  556. * (So: less common with truly self-powered devices, where
  557. * vbus doesn't act like a power supply.)
  558. *
  559. * Such spikes are short; usually less than ~500 usec, max
  560. * of ~2 msec. That is, they're not sustained overcurrent
  561. * errors, though they're reported using VBUSERROR irqs.
  562. *
  563. * Workarounds: (a) hardware: use self powered devices.
  564. * (b) software: ignore non-repeated VBUS errors.
  565. *
  566. * REVISIT: do delays from lots of DEBUG_KERNEL checks
  567. * make trouble here, keeping VBUS < 4.4V ?
  568. */
  569. switch (musb->xceiv->otg->state) {
  570. case OTG_STATE_A_HOST:
  571. /* recovery is dicey once we've gotten past the
  572. * initial stages of enumeration, but if VBUS
  573. * stayed ok at the other end of the link, and
  574. * another reset is due (at least for high speed,
  575. * to redo the chirp etc), it might work OK...
  576. */
  577. case OTG_STATE_A_WAIT_BCON:
  578. case OTG_STATE_A_WAIT_VRISE:
  579. if (musb->vbuserr_retry) {
  580. void __iomem *mbase = musb->mregs;
  581. musb->vbuserr_retry--;
  582. ignore = 1;
  583. devctl |= MUSB_DEVCTL_SESSION;
  584. musb_writeb(mbase, MUSB_DEVCTL, devctl);
  585. } else {
  586. musb->port1_status |=
  587. USB_PORT_STAT_OVERCURRENT
  588. | (USB_PORT_STAT_C_OVERCURRENT << 16);
  589. }
  590. break;
  591. default:
  592. break;
  593. }
  594. dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller,
  595. "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
  596. usb_otg_state_string(musb->xceiv->otg->state),
  597. devctl,
  598. ({ char *s;
  599. switch (devctl & MUSB_DEVCTL_VBUS) {
  600. case 0 << MUSB_DEVCTL_VBUS_SHIFT:
  601. s = "<SessEnd"; break;
  602. case 1 << MUSB_DEVCTL_VBUS_SHIFT:
  603. s = "<AValid"; break;
  604. case 2 << MUSB_DEVCTL_VBUS_SHIFT:
  605. s = "<VBusValid"; break;
  606. /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
  607. default:
  608. s = "VALID"; break;
  609. } s; }),
  610. VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
  611. musb->port1_status);
  612. /* go through A_WAIT_VFALL then start a new session */
  613. if (!ignore)
  614. musb_platform_set_vbus(musb, 0);
  615. handled = IRQ_HANDLED;
  616. }
  617. if (int_usb & MUSB_INTR_SUSPEND) {
  618. musb_dbg(musb, "SUSPEND (%s) devctl %02x",
  619. usb_otg_state_string(musb->xceiv->otg->state), devctl);
  620. handled = IRQ_HANDLED;
  621. switch (musb->xceiv->otg->state) {
  622. case OTG_STATE_A_PERIPHERAL:
  623. /* We also come here if the cable is removed, since
  624. * this silicon doesn't report ID-no-longer-grounded.
  625. *
  626. * We depend on T(a_wait_bcon) to shut us down, and
  627. * hope users don't do anything dicey during this
  628. * undesired detour through A_WAIT_BCON.
  629. */
  630. musb_hnp_stop(musb);
  631. musb_host_resume_root_hub(musb);
  632. musb_root_disconnect(musb);
  633. musb_platform_try_idle(musb, jiffies
  634. + msecs_to_jiffies(musb->a_wait_bcon
  635. ? : OTG_TIME_A_WAIT_BCON));
  636. break;
  637. case OTG_STATE_B_IDLE:
  638. if (!musb->is_active)
  639. break;
  640. /* fall through */
  641. case OTG_STATE_B_PERIPHERAL:
  642. musb_g_suspend(musb);
  643. musb->is_active = musb->g.b_hnp_enable;
  644. if (musb->is_active) {
  645. musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
  646. musb_dbg(musb, "HNP: Setting timer for b_ase0_brst");
  647. mod_timer(&musb->otg_timer, jiffies
  648. + msecs_to_jiffies(
  649. OTG_TIME_B_ASE0_BRST));
  650. }
  651. break;
  652. case OTG_STATE_A_WAIT_BCON:
  653. if (musb->a_wait_bcon != 0)
  654. musb_platform_try_idle(musb, jiffies
  655. + msecs_to_jiffies(musb->a_wait_bcon));
  656. break;
  657. case OTG_STATE_A_HOST:
  658. musb->xceiv->otg->state = OTG_STATE_A_SUSPEND;
  659. musb->is_active = musb->hcd->self.b_hnp_enable;
  660. break;
  661. case OTG_STATE_B_HOST:
  662. /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
  663. musb_dbg(musb, "REVISIT: SUSPEND as B_HOST");
  664. break;
  665. default:
  666. /* "should not happen" */
  667. musb->is_active = 0;
  668. break;
  669. }
  670. }
  671. if (int_usb & MUSB_INTR_CONNECT) {
  672. struct usb_hcd *hcd = musb->hcd;
  673. handled = IRQ_HANDLED;
  674. musb->is_active = 1;
  675. musb->ep0_stage = MUSB_EP0_START;
  676. musb->intrtxe = musb->epmask;
  677. musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
  678. musb->intrrxe = musb->epmask & 0xfffe;
  679. musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
  680. musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
  681. musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
  682. |USB_PORT_STAT_HIGH_SPEED
  683. |USB_PORT_STAT_ENABLE
  684. );
  685. musb->port1_status |= USB_PORT_STAT_CONNECTION
  686. |(USB_PORT_STAT_C_CONNECTION << 16);
  687. /* high vs full speed is just a guess until after reset */
  688. if (devctl & MUSB_DEVCTL_LSDEV)
  689. musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
  690. /* indicate new connection to OTG machine */
  691. switch (musb->xceiv->otg->state) {
  692. case OTG_STATE_B_PERIPHERAL:
  693. if (int_usb & MUSB_INTR_SUSPEND) {
  694. musb_dbg(musb, "HNP: SUSPEND+CONNECT, now b_host");
  695. int_usb &= ~MUSB_INTR_SUSPEND;
  696. goto b_host;
  697. } else
  698. musb_dbg(musb, "CONNECT as b_peripheral???");
  699. break;
  700. case OTG_STATE_B_WAIT_ACON:
  701. musb_dbg(musb, "HNP: CONNECT, now b_host");
  702. b_host:
  703. musb->xceiv->otg->state = OTG_STATE_B_HOST;
  704. if (musb->hcd)
  705. musb->hcd->self.is_b_host = 1;
  706. del_timer(&musb->otg_timer);
  707. break;
  708. default:
  709. if ((devctl & MUSB_DEVCTL_VBUS)
  710. == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
  711. musb->xceiv->otg->state = OTG_STATE_A_HOST;
  712. if (hcd)
  713. hcd->self.is_b_host = 0;
  714. }
  715. break;
  716. }
  717. musb_host_poke_root_hub(musb);
  718. musb_dbg(musb, "CONNECT (%s) devctl %02x",
  719. usb_otg_state_string(musb->xceiv->otg->state), devctl);
  720. }
  721. if (int_usb & MUSB_INTR_DISCONNECT) {
  722. musb_dbg(musb, "DISCONNECT (%s) as %s, devctl %02x",
  723. usb_otg_state_string(musb->xceiv->otg->state),
  724. MUSB_MODE(musb), devctl);
  725. handled = IRQ_HANDLED;
  726. switch (musb->xceiv->otg->state) {
  727. case OTG_STATE_A_HOST:
  728. case OTG_STATE_A_SUSPEND:
  729. musb_host_resume_root_hub(musb);
  730. musb_root_disconnect(musb);
  731. if (musb->a_wait_bcon != 0)
  732. musb_platform_try_idle(musb, jiffies
  733. + msecs_to_jiffies(musb->a_wait_bcon));
  734. break;
  735. case OTG_STATE_B_HOST:
  736. /* REVISIT this behaves for "real disconnect"
  737. * cases; make sure the other transitions from
  738. * from B_HOST act right too. The B_HOST code
  739. * in hnp_stop() is currently not used...
  740. */
  741. musb_root_disconnect(musb);
  742. if (musb->hcd)
  743. musb->hcd->self.is_b_host = 0;
  744. musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
  745. MUSB_DEV_MODE(musb);
  746. musb_g_disconnect(musb);
  747. break;
  748. case OTG_STATE_A_PERIPHERAL:
  749. musb_hnp_stop(musb);
  750. musb_root_disconnect(musb);
  751. /* FALLTHROUGH */
  752. case OTG_STATE_B_WAIT_ACON:
  753. /* FALLTHROUGH */
  754. case OTG_STATE_B_PERIPHERAL:
  755. case OTG_STATE_B_IDLE:
  756. musb_g_disconnect(musb);
  757. break;
  758. default:
  759. WARNING("unhandled DISCONNECT transition (%s)\n",
  760. usb_otg_state_string(musb->xceiv->otg->state));
  761. break;
  762. }
  763. }
  764. /* mentor saves a bit: bus reset and babble share the same irq.
  765. * only host sees babble; only peripheral sees bus reset.
  766. */
  767. if (int_usb & MUSB_INTR_RESET) {
  768. handled = IRQ_HANDLED;
  769. if (is_host_active(musb)) {
  770. /*
  771. * When BABBLE happens what we can depends on which
  772. * platform MUSB is running, because some platforms
  773. * implemented proprietary means for 'recovering' from
  774. * Babble conditions. One such platform is AM335x. In
  775. * most cases, however, the only thing we can do is
  776. * drop the session.
  777. */
  778. dev_err(musb->controller, "Babble\n");
  779. musb_recover_from_babble(musb);
  780. } else {
  781. musb_dbg(musb, "BUS RESET as %s",
  782. usb_otg_state_string(musb->xceiv->otg->state));
  783. switch (musb->xceiv->otg->state) {
  784. case OTG_STATE_A_SUSPEND:
  785. musb_g_reset(musb);
  786. /* FALLTHROUGH */
  787. case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
  788. /* never use invalid T(a_wait_bcon) */
  789. musb_dbg(musb, "HNP: in %s, %d msec timeout",
  790. usb_otg_state_string(musb->xceiv->otg->state),
  791. TA_WAIT_BCON(musb));
  792. mod_timer(&musb->otg_timer, jiffies
  793. + msecs_to_jiffies(TA_WAIT_BCON(musb)));
  794. break;
  795. case OTG_STATE_A_PERIPHERAL:
  796. del_timer(&musb->otg_timer);
  797. musb_g_reset(musb);
  798. break;
  799. case OTG_STATE_B_WAIT_ACON:
  800. musb_dbg(musb, "HNP: RESET (%s), to b_peripheral",
  801. usb_otg_state_string(musb->xceiv->otg->state));
  802. musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
  803. musb_g_reset(musb);
  804. break;
  805. case OTG_STATE_B_IDLE:
  806. musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
  807. /* FALLTHROUGH */
  808. case OTG_STATE_B_PERIPHERAL:
  809. musb_g_reset(musb);
  810. break;
  811. default:
  812. musb_dbg(musb, "Unhandled BUS RESET as %s",
  813. usb_otg_state_string(musb->xceiv->otg->state));
  814. }
  815. }
  816. }
  817. #if 0
  818. /* REVISIT ... this would be for multiplexing periodic endpoints, or
  819. * supporting transfer phasing to prevent exceeding ISO bandwidth
  820. * limits of a given frame or microframe.
  821. *
  822. * It's not needed for peripheral side, which dedicates endpoints;
  823. * though it _might_ use SOF irqs for other purposes.
  824. *
  825. * And it's not currently needed for host side, which also dedicates
  826. * endpoints, relies on TX/RX interval registers, and isn't claimed
  827. * to support ISO transfers yet.
  828. */
  829. if (int_usb & MUSB_INTR_SOF) {
  830. void __iomem *mbase = musb->mregs;
  831. struct musb_hw_ep *ep;
  832. u8 epnum;
  833. u16 frame;
  834. dev_dbg(musb->controller, "START_OF_FRAME\n");
  835. handled = IRQ_HANDLED;
  836. /* start any periodic Tx transfers waiting for current frame */
  837. frame = musb_readw(mbase, MUSB_FRAME);
  838. ep = musb->endpoints;
  839. for (epnum = 1; (epnum < musb->nr_endpoints)
  840. && (musb->epmask >= (1 << epnum));
  841. epnum++, ep++) {
  842. /*
  843. * FIXME handle framecounter wraps (12 bits)
  844. * eliminate duplicated StartUrb logic
  845. */
  846. if (ep->dwWaitFrame >= frame) {
  847. ep->dwWaitFrame = 0;
  848. pr_debug("SOF --> periodic TX%s on %d\n",
  849. ep->tx_channel ? " DMA" : "",
  850. epnum);
  851. if (!ep->tx_channel)
  852. musb_h_tx_start(musb, epnum);
  853. else
  854. cppi_hostdma_start(musb, epnum);
  855. }
  856. } /* end of for loop */
  857. }
  858. #endif
  859. schedule_delayed_work(&musb->irq_work, 0);
  860. return handled;
  861. }
  862. /*-------------------------------------------------------------------------*/
  863. static void musb_disable_interrupts(struct musb *musb)
  864. {
  865. void __iomem *mbase = musb->mregs;
  866. u16 temp;
  867. /* disable interrupts */
  868. musb_writeb(mbase, MUSB_INTRUSBE, 0);
  869. musb->intrtxe = 0;
  870. musb_writew(mbase, MUSB_INTRTXE, 0);
  871. musb->intrrxe = 0;
  872. musb_writew(mbase, MUSB_INTRRXE, 0);
  873. /* flush pending interrupts */
  874. temp = musb_readb(mbase, MUSB_INTRUSB);
  875. temp = musb_readw(mbase, MUSB_INTRTX);
  876. temp = musb_readw(mbase, MUSB_INTRRX);
  877. }
  878. static void musb_enable_interrupts(struct musb *musb)
  879. {
  880. void __iomem *regs = musb->mregs;
  881. /* Set INT enable registers, enable interrupts */
  882. musb->intrtxe = musb->epmask;
  883. musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
  884. musb->intrrxe = musb->epmask & 0xfffe;
  885. musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
  886. musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
  887. }
  888. /*
  889. * Program the HDRC to start (enable interrupts, dma, etc.).
  890. */
  891. void musb_start(struct musb *musb)
  892. {
  893. void __iomem *regs = musb->mregs;
  894. u8 devctl = musb_readb(regs, MUSB_DEVCTL);
  895. u8 power;
  896. musb_dbg(musb, "<== devctl %02x", devctl);
  897. musb_enable_interrupts(musb);
  898. musb_writeb(regs, MUSB_TESTMODE, 0);
  899. power = MUSB_POWER_ISOUPDATE;
  900. /*
  901. * treating UNKNOWN as unspecified maximum speed, in which case
  902. * we will default to high-speed.
  903. */
  904. if (musb->config->maximum_speed == USB_SPEED_HIGH ||
  905. musb->config->maximum_speed == USB_SPEED_UNKNOWN)
  906. power |= MUSB_POWER_HSENAB;
  907. musb_writeb(regs, MUSB_POWER, power);
  908. musb->is_active = 0;
  909. devctl = musb_readb(regs, MUSB_DEVCTL);
  910. devctl &= ~MUSB_DEVCTL_SESSION;
  911. /* session started after:
  912. * (a) ID-grounded irq, host mode;
  913. * (b) vbus present/connect IRQ, peripheral mode;
  914. * (c) peripheral initiates, using SRP
  915. */
  916. if (musb->port_mode != MUSB_PORT_MODE_HOST &&
  917. musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON &&
  918. (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
  919. musb->is_active = 1;
  920. } else {
  921. devctl |= MUSB_DEVCTL_SESSION;
  922. }
  923. musb_platform_enable(musb);
  924. musb_writeb(regs, MUSB_DEVCTL, devctl);
  925. }
  926. /*
  927. * Make the HDRC stop (disable interrupts, etc.);
  928. * reversible by musb_start
  929. * called on gadget driver unregister
  930. * with controller locked, irqs blocked
  931. * acts as a NOP unless some role activated the hardware
  932. */
  933. void musb_stop(struct musb *musb)
  934. {
  935. /* stop IRQs, timers, ... */
  936. musb_platform_disable(musb);
  937. musb_disable_interrupts(musb);
  938. musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
  939. /* FIXME
  940. * - mark host and/or peripheral drivers unusable/inactive
  941. * - disable DMA (and enable it in HdrcStart)
  942. * - make sure we can musb_start() after musb_stop(); with
  943. * OTG mode, gadget driver module rmmod/modprobe cycles that
  944. * - ...
  945. */
  946. musb_platform_try_idle(musb, 0);
  947. }
  948. /*-------------------------------------------------------------------------*/
  949. /*
  950. * The silicon either has hard-wired endpoint configurations, or else
  951. * "dynamic fifo" sizing. The driver has support for both, though at this
  952. * writing only the dynamic sizing is very well tested. Since we switched
  953. * away from compile-time hardware parameters, we can no longer rely on
  954. * dead code elimination to leave only the relevant one in the object file.
  955. *
  956. * We don't currently use dynamic fifo setup capability to do anything
  957. * more than selecting one of a bunch of predefined configurations.
  958. */
  959. static ushort fifo_mode;
  960. /* "modprobe ... fifo_mode=1" etc */
  961. module_param(fifo_mode, ushort, 0);
  962. MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
  963. /*
  964. * tables defining fifo_mode values. define more if you like.
  965. * for host side, make sure both halves of ep1 are set up.
  966. */
  967. /* mode 0 - fits in 2KB */
  968. static struct musb_fifo_cfg mode_0_cfg[] = {
  969. { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
  970. { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
  971. { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
  972. { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
  973. { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
  974. };
  975. /* mode 1 - fits in 4KB */
  976. static struct musb_fifo_cfg mode_1_cfg[] = {
  977. { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
  978. { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
  979. { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
  980. { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
  981. { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
  982. };
  983. /* mode 2 - fits in 4KB */
  984. static struct musb_fifo_cfg mode_2_cfg[] = {
  985. { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
  986. { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
  987. { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
  988. { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
  989. { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 960, },
  990. { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 1024, },
  991. };
  992. /* mode 3 - fits in 4KB */
  993. static struct musb_fifo_cfg mode_3_cfg[] = {
  994. { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
  995. { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
  996. { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
  997. { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
  998. { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
  999. { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
  1000. };
  1001. /* mode 4 - fits in 16KB */
  1002. static struct musb_fifo_cfg mode_4_cfg[] = {
  1003. { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
  1004. { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
  1005. { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
  1006. { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
  1007. { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
  1008. { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
  1009. { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
  1010. { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
  1011. { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
  1012. { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
  1013. { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, },
  1014. { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, },
  1015. { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
  1016. { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, },
  1017. { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, },
  1018. { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
  1019. { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
  1020. { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
  1021. { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, },
  1022. { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, },
  1023. { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, },
  1024. { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, },
  1025. { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, },
  1026. { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, },
  1027. { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
  1028. { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
  1029. { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
  1030. };
  1031. /* mode 5 - fits in 8KB */
  1032. static struct musb_fifo_cfg mode_5_cfg[] = {
  1033. { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
  1034. { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
  1035. { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
  1036. { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
  1037. { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
  1038. { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
  1039. { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
  1040. { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
  1041. { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
  1042. { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
  1043. { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
  1044. { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
  1045. { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
  1046. { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
  1047. { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
  1048. { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
  1049. { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
  1050. { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
  1051. { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
  1052. { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
  1053. { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
  1054. { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
  1055. { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
  1056. { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
  1057. { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
  1058. { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
  1059. { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
  1060. };
  1061. /*
  1062. * configure a fifo; for non-shared endpoints, this may be called
  1063. * once for a tx fifo and once for an rx fifo.
  1064. *
  1065. * returns negative errno or offset for next fifo.
  1066. */
  1067. static int
  1068. fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
  1069. const struct musb_fifo_cfg *cfg, u16 offset)
  1070. {
  1071. void __iomem *mbase = musb->mregs;
  1072. int size = 0;
  1073. u16 maxpacket = cfg->maxpacket;
  1074. u16 c_off = offset >> 3;
  1075. u8 c_size;
  1076. /* expect hw_ep has already been zero-initialized */
  1077. size = ffs(max(maxpacket, (u16) 8)) - 1;
  1078. maxpacket = 1 << size;
  1079. c_size = size - 3;
  1080. if (cfg->mode == BUF_DOUBLE) {
  1081. if ((offset + (maxpacket << 1)) >
  1082. (1 << (musb->config->ram_bits + 2)))
  1083. return -EMSGSIZE;
  1084. c_size |= MUSB_FIFOSZ_DPB;
  1085. } else {
  1086. if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
  1087. return -EMSGSIZE;
  1088. }
  1089. /* configure the FIFO */
  1090. musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
  1091. /* EP0 reserved endpoint for control, bidirectional;
  1092. * EP1 reserved for bulk, two unidirectional halves.
  1093. */
  1094. if (hw_ep->epnum == 1)
  1095. musb->bulk_ep = hw_ep;
  1096. /* REVISIT error check: be sure ep0 can both rx and tx ... */
  1097. switch (cfg->style) {
  1098. case FIFO_TX:
  1099. musb_write_txfifosz(mbase, c_size);
  1100. musb_write_txfifoadd(mbase, c_off);
  1101. hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
  1102. hw_ep->max_packet_sz_tx = maxpacket;
  1103. break;
  1104. case FIFO_RX:
  1105. musb_write_rxfifosz(mbase, c_size);
  1106. musb_write_rxfifoadd(mbase, c_off);
  1107. hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
  1108. hw_ep->max_packet_sz_rx = maxpacket;
  1109. break;
  1110. case FIFO_RXTX:
  1111. musb_write_txfifosz(mbase, c_size);
  1112. musb_write_txfifoadd(mbase, c_off);
  1113. hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
  1114. hw_ep->max_packet_sz_rx = maxpacket;
  1115. musb_write_rxfifosz(mbase, c_size);
  1116. musb_write_rxfifoadd(mbase, c_off);
  1117. hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
  1118. hw_ep->max_packet_sz_tx = maxpacket;
  1119. hw_ep->is_shared_fifo = true;
  1120. break;
  1121. }
  1122. /* NOTE rx and tx endpoint irqs aren't managed separately,
  1123. * which happens to be ok
  1124. */
  1125. musb->epmask |= (1 << hw_ep->epnum);
  1126. return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
  1127. }
  1128. static struct musb_fifo_cfg ep0_cfg = {
  1129. .style = FIFO_RXTX, .maxpacket = 64,
  1130. };
  1131. static int ep_config_from_table(struct musb *musb)
  1132. {
  1133. const struct musb_fifo_cfg *cfg;
  1134. unsigned i, n;
  1135. int offset;
  1136. struct musb_hw_ep *hw_ep = musb->endpoints;
  1137. if (musb->config->fifo_cfg) {
  1138. cfg = musb->config->fifo_cfg;
  1139. n = musb->config->fifo_cfg_size;
  1140. goto done;
  1141. }
  1142. switch (fifo_mode) {
  1143. default:
  1144. fifo_mode = 0;
  1145. /* FALLTHROUGH */
  1146. case 0:
  1147. cfg = mode_0_cfg;
  1148. n = ARRAY_SIZE(mode_0_cfg);
  1149. break;
  1150. case 1:
  1151. cfg = mode_1_cfg;
  1152. n = ARRAY_SIZE(mode_1_cfg);
  1153. break;
  1154. case 2:
  1155. cfg = mode_2_cfg;
  1156. n = ARRAY_SIZE(mode_2_cfg);
  1157. break;
  1158. case 3:
  1159. cfg = mode_3_cfg;
  1160. n = ARRAY_SIZE(mode_3_cfg);
  1161. break;
  1162. case 4:
  1163. cfg = mode_4_cfg;
  1164. n = ARRAY_SIZE(mode_4_cfg);
  1165. break;
  1166. case 5:
  1167. cfg = mode_5_cfg;
  1168. n = ARRAY_SIZE(mode_5_cfg);
  1169. break;
  1170. }
  1171. pr_debug("%s: setup fifo_mode %d\n", musb_driver_name, fifo_mode);
  1172. done:
  1173. offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
  1174. /* assert(offset > 0) */
  1175. /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
  1176. * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
  1177. */
  1178. for (i = 0; i < n; i++) {
  1179. u8 epn = cfg->hw_ep_num;
  1180. if (epn >= musb->config->num_eps) {
  1181. pr_debug("%s: invalid ep %d\n",
  1182. musb_driver_name, epn);
  1183. return -EINVAL;
  1184. }
  1185. offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
  1186. if (offset < 0) {
  1187. pr_debug("%s: mem overrun, ep %d\n",
  1188. musb_driver_name, epn);
  1189. return offset;
  1190. }
  1191. epn++;
  1192. musb->nr_endpoints = max(epn, musb->nr_endpoints);
  1193. }
  1194. pr_debug("%s: %d/%d max ep, %d/%d memory\n",
  1195. musb_driver_name,
  1196. n + 1, musb->config->num_eps * 2 - 1,
  1197. offset, (1 << (musb->config->ram_bits + 2)));
  1198. if (!musb->bulk_ep) {
  1199. pr_debug("%s: missing bulk\n", musb_driver_name);
  1200. return -EINVAL;
  1201. }
  1202. return 0;
  1203. }
  1204. /*
  1205. * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
  1206. * @param musb the controller
  1207. */
  1208. static int ep_config_from_hw(struct musb *musb)
  1209. {
  1210. u8 epnum = 0;
  1211. struct musb_hw_ep *hw_ep;
  1212. void __iomem *mbase = musb->mregs;
  1213. int ret = 0;
  1214. musb_dbg(musb, "<== static silicon ep config");
  1215. /* FIXME pick up ep0 maxpacket size */
  1216. for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
  1217. musb_ep_select(mbase, epnum);
  1218. hw_ep = musb->endpoints + epnum;
  1219. ret = musb_read_fifosize(musb, hw_ep, epnum);
  1220. if (ret < 0)
  1221. break;
  1222. /* FIXME set up hw_ep->{rx,tx}_double_buffered */
  1223. /* pick an RX/TX endpoint for bulk */
  1224. if (hw_ep->max_packet_sz_tx < 512
  1225. || hw_ep->max_packet_sz_rx < 512)
  1226. continue;
  1227. /* REVISIT: this algorithm is lazy, we should at least
  1228. * try to pick a double buffered endpoint.
  1229. */
  1230. if (musb->bulk_ep)
  1231. continue;
  1232. musb->bulk_ep = hw_ep;
  1233. }
  1234. if (!musb->bulk_ep) {
  1235. pr_debug("%s: missing bulk\n", musb_driver_name);
  1236. return -EINVAL;
  1237. }
  1238. return 0;
  1239. }
  1240. enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
  1241. /* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
  1242. * configure endpoints, or take their config from silicon
  1243. */
  1244. static int musb_core_init(u16 musb_type, struct musb *musb)
  1245. {
  1246. u8 reg;
  1247. char *type;
  1248. char aInfo[90];
  1249. void __iomem *mbase = musb->mregs;
  1250. int status = 0;
  1251. int i;
  1252. /* log core options (read using indexed model) */
  1253. reg = musb_read_configdata(mbase);
  1254. strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
  1255. if (reg & MUSB_CONFIGDATA_DYNFIFO) {
  1256. strcat(aInfo, ", dyn FIFOs");
  1257. musb->dyn_fifo = true;
  1258. }
  1259. if (reg & MUSB_CONFIGDATA_MPRXE) {
  1260. strcat(aInfo, ", bulk combine");
  1261. musb->bulk_combine = true;
  1262. }
  1263. if (reg & MUSB_CONFIGDATA_MPTXE) {
  1264. strcat(aInfo, ", bulk split");
  1265. musb->bulk_split = true;
  1266. }
  1267. if (reg & MUSB_CONFIGDATA_HBRXE) {
  1268. strcat(aInfo, ", HB-ISO Rx");
  1269. musb->hb_iso_rx = true;
  1270. }
  1271. if (reg & MUSB_CONFIGDATA_HBTXE) {
  1272. strcat(aInfo, ", HB-ISO Tx");
  1273. musb->hb_iso_tx = true;
  1274. }
  1275. if (reg & MUSB_CONFIGDATA_SOFTCONE)
  1276. strcat(aInfo, ", SoftConn");
  1277. pr_debug("%s: ConfigData=0x%02x (%s)\n", musb_driver_name, reg, aInfo);
  1278. if (MUSB_CONTROLLER_MHDRC == musb_type) {
  1279. musb->is_multipoint = 1;
  1280. type = "M";
  1281. } else {
  1282. musb->is_multipoint = 0;
  1283. type = "";
  1284. #ifndef CONFIG_USB_OTG_BLACKLIST_HUB
  1285. pr_err("%s: kernel must blacklist external hubs\n",
  1286. musb_driver_name);
  1287. #endif
  1288. }
  1289. /* log release info */
  1290. musb->hwvers = musb_read_hwvers(mbase);
  1291. pr_debug("%s: %sHDRC RTL version %d.%d%s\n",
  1292. musb_driver_name, type, MUSB_HWVERS_MAJOR(musb->hwvers),
  1293. MUSB_HWVERS_MINOR(musb->hwvers),
  1294. (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
  1295. /* configure ep0 */
  1296. musb_configure_ep0(musb);
  1297. /* discover endpoint configuration */
  1298. musb->nr_endpoints = 1;
  1299. musb->epmask = 1;
  1300. if (musb->dyn_fifo)
  1301. status = ep_config_from_table(musb);
  1302. else
  1303. status = ep_config_from_hw(musb);
  1304. if (status < 0)
  1305. return status;
  1306. /* finish init, and print endpoint config */
  1307. for (i = 0; i < musb->nr_endpoints; i++) {
  1308. struct musb_hw_ep *hw_ep = musb->endpoints + i;
  1309. hw_ep->fifo = musb->io.fifo_offset(i) + mbase;
  1310. #if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
  1311. if (musb->io.quirks & MUSB_IN_TUSB) {
  1312. hw_ep->fifo_async = musb->async + 0x400 +
  1313. musb->io.fifo_offset(i);
  1314. hw_ep->fifo_sync = musb->sync + 0x400 +
  1315. musb->io.fifo_offset(i);
  1316. hw_ep->fifo_sync_va =
  1317. musb->sync_va + 0x400 + musb->io.fifo_offset(i);
  1318. if (i == 0)
  1319. hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
  1320. else
  1321. hw_ep->conf = mbase + 0x400 +
  1322. (((i - 1) & 0xf) << 2);
  1323. }
  1324. #endif
  1325. hw_ep->regs = musb->io.ep_offset(i, 0) + mbase;
  1326. hw_ep->rx_reinit = 1;
  1327. hw_ep->tx_reinit = 1;
  1328. if (hw_ep->max_packet_sz_tx) {
  1329. musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
  1330. musb_driver_name, i,
  1331. hw_ep->is_shared_fifo ? "shared" : "tx",
  1332. hw_ep->tx_double_buffered
  1333. ? "doublebuffer, " : "",
  1334. hw_ep->max_packet_sz_tx);
  1335. }
  1336. if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
  1337. musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
  1338. musb_driver_name, i,
  1339. "rx",
  1340. hw_ep->rx_double_buffered
  1341. ? "doublebuffer, " : "",
  1342. hw_ep->max_packet_sz_rx);
  1343. }
  1344. if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
  1345. musb_dbg(musb, "hw_ep %d not configured", i);
  1346. }
  1347. return 0;
  1348. }
  1349. /*-------------------------------------------------------------------------*/
  1350. /*
  1351. * handle all the irqs defined by the HDRC core. for now we expect: other
  1352. * irq sources (phy, dma, etc) will be handled first, musb->int_* values
  1353. * will be assigned, and the irq will already have been acked.
  1354. *
  1355. * called in irq context with spinlock held, irqs blocked
  1356. */
  1357. irqreturn_t musb_interrupt(struct musb *musb)
  1358. {
  1359. irqreturn_t retval = IRQ_NONE;
  1360. unsigned long status;
  1361. unsigned long epnum;
  1362. u8 devctl;
  1363. if (!musb->int_usb && !musb->int_tx && !musb->int_rx)
  1364. return IRQ_NONE;
  1365. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  1366. trace_musb_isr(musb);
  1367. /**
  1368. * According to Mentor Graphics' documentation, flowchart on page 98,
  1369. * IRQ should be handled as follows:
  1370. *
  1371. * . Resume IRQ
  1372. * . Session Request IRQ
  1373. * . VBUS Error IRQ
  1374. * . Suspend IRQ
  1375. * . Connect IRQ
  1376. * . Disconnect IRQ
  1377. * . Reset/Babble IRQ
  1378. * . SOF IRQ (we're not using this one)
  1379. * . Endpoint 0 IRQ
  1380. * . TX Endpoints
  1381. * . RX Endpoints
  1382. *
  1383. * We will be following that flowchart in order to avoid any problems
  1384. * that might arise with internal Finite State Machine.
  1385. */
  1386. if (musb->int_usb)
  1387. retval |= musb_stage0_irq(musb, musb->int_usb, devctl);
  1388. if (musb->int_tx & 1) {
  1389. if (is_host_active(musb))
  1390. retval |= musb_h_ep0_irq(musb);
  1391. else
  1392. retval |= musb_g_ep0_irq(musb);
  1393. /* we have just handled endpoint 0 IRQ, clear it */
  1394. musb->int_tx &= ~BIT(0);
  1395. }
  1396. status = musb->int_tx;
  1397. for_each_set_bit(epnum, &status, 16) {
  1398. retval = IRQ_HANDLED;
  1399. if (is_host_active(musb))
  1400. musb_host_tx(musb, epnum);
  1401. else
  1402. musb_g_tx(musb, epnum);
  1403. }
  1404. status = musb->int_rx;
  1405. for_each_set_bit(epnum, &status, 16) {
  1406. retval = IRQ_HANDLED;
  1407. if (is_host_active(musb))
  1408. musb_host_rx(musb, epnum);
  1409. else
  1410. musb_g_rx(musb, epnum);
  1411. }
  1412. return retval;
  1413. }
  1414. EXPORT_SYMBOL_GPL(musb_interrupt);
  1415. #ifndef CONFIG_MUSB_PIO_ONLY
  1416. static bool use_dma = 1;
  1417. /* "modprobe ... use_dma=0" etc */
  1418. module_param(use_dma, bool, 0644);
  1419. MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
  1420. void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
  1421. {
  1422. /* called with controller lock already held */
  1423. if (!epnum) {
  1424. if (!is_cppi_enabled(musb)) {
  1425. /* endpoint 0 */
  1426. if (is_host_active(musb))
  1427. musb_h_ep0_irq(musb);
  1428. else
  1429. musb_g_ep0_irq(musb);
  1430. }
  1431. } else {
  1432. /* endpoints 1..15 */
  1433. if (transmit) {
  1434. if (is_host_active(musb))
  1435. musb_host_tx(musb, epnum);
  1436. else
  1437. musb_g_tx(musb, epnum);
  1438. } else {
  1439. /* receive */
  1440. if (is_host_active(musb))
  1441. musb_host_rx(musb, epnum);
  1442. else
  1443. musb_g_rx(musb, epnum);
  1444. }
  1445. }
  1446. }
  1447. EXPORT_SYMBOL_GPL(musb_dma_completion);
  1448. #else
  1449. #define use_dma 0
  1450. #endif
  1451. static int (*musb_phy_callback)(enum musb_vbus_id_status status);
  1452. /*
  1453. * musb_mailbox - optional phy notifier function
  1454. * @status phy state change
  1455. *
  1456. * Optionally gets called from the USB PHY. Note that the USB PHY must be
  1457. * disabled at the point the phy_callback is registered or unregistered.
  1458. */
  1459. int musb_mailbox(enum musb_vbus_id_status status)
  1460. {
  1461. if (musb_phy_callback)
  1462. return musb_phy_callback(status);
  1463. return -ENODEV;
  1464. };
  1465. EXPORT_SYMBOL_GPL(musb_mailbox);
  1466. /*-------------------------------------------------------------------------*/
  1467. static ssize_t
  1468. mode_show(struct device *dev, struct device_attribute *attr, char *buf)
  1469. {
  1470. struct musb *musb = dev_to_musb(dev);
  1471. unsigned long flags;
  1472. int ret = -EINVAL;
  1473. spin_lock_irqsave(&musb->lock, flags);
  1474. ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->otg->state));
  1475. spin_unlock_irqrestore(&musb->lock, flags);
  1476. return ret;
  1477. }
  1478. static ssize_t
  1479. mode_store(struct device *dev, struct device_attribute *attr,
  1480. const char *buf, size_t n)
  1481. {
  1482. struct musb *musb = dev_to_musb(dev);
  1483. unsigned long flags;
  1484. int status;
  1485. spin_lock_irqsave(&musb->lock, flags);
  1486. if (sysfs_streq(buf, "host"))
  1487. status = musb_platform_set_mode(musb, MUSB_HOST);
  1488. else if (sysfs_streq(buf, "peripheral"))
  1489. status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
  1490. else if (sysfs_streq(buf, "otg"))
  1491. status = musb_platform_set_mode(musb, MUSB_OTG);
  1492. else
  1493. status = -EINVAL;
  1494. spin_unlock_irqrestore(&musb->lock, flags);
  1495. return (status == 0) ? n : status;
  1496. }
  1497. static DEVICE_ATTR_RW(mode);
  1498. static ssize_t
  1499. vbus_store(struct device *dev, struct device_attribute *attr,
  1500. const char *buf, size_t n)
  1501. {
  1502. struct musb *musb = dev_to_musb(dev);
  1503. unsigned long flags;
  1504. unsigned long val;
  1505. if (sscanf(buf, "%lu", &val) < 1) {
  1506. dev_err(dev, "Invalid VBUS timeout ms value\n");
  1507. return -EINVAL;
  1508. }
  1509. spin_lock_irqsave(&musb->lock, flags);
  1510. /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
  1511. musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
  1512. if (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)
  1513. musb->is_active = 0;
  1514. musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
  1515. spin_unlock_irqrestore(&musb->lock, flags);
  1516. return n;
  1517. }
  1518. static ssize_t
  1519. vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
  1520. {
  1521. struct musb *musb = dev_to_musb(dev);
  1522. unsigned long flags;
  1523. unsigned long val;
  1524. int vbus;
  1525. u8 devctl;
  1526. spin_lock_irqsave(&musb->lock, flags);
  1527. val = musb->a_wait_bcon;
  1528. vbus = musb_platform_get_vbus_status(musb);
  1529. if (vbus < 0) {
  1530. /* Use default MUSB method by means of DEVCTL register */
  1531. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  1532. if ((devctl & MUSB_DEVCTL_VBUS)
  1533. == (3 << MUSB_DEVCTL_VBUS_SHIFT))
  1534. vbus = 1;
  1535. else
  1536. vbus = 0;
  1537. }
  1538. spin_unlock_irqrestore(&musb->lock, flags);
  1539. return sprintf(buf, "Vbus %s, timeout %lu msec\n",
  1540. vbus ? "on" : "off", val);
  1541. }
  1542. static DEVICE_ATTR_RW(vbus);
  1543. /* Gadget drivers can't know that a host is connected so they might want
  1544. * to start SRP, but users can. This allows userspace to trigger SRP.
  1545. */
  1546. static ssize_t srp_store(struct device *dev, struct device_attribute *attr,
  1547. const char *buf, size_t n)
  1548. {
  1549. struct musb *musb = dev_to_musb(dev);
  1550. unsigned short srp;
  1551. if (sscanf(buf, "%hu", &srp) != 1
  1552. || (srp != 1)) {
  1553. dev_err(dev, "SRP: Value must be 1\n");
  1554. return -EINVAL;
  1555. }
  1556. if (srp == 1)
  1557. musb_g_wakeup(musb);
  1558. return n;
  1559. }
  1560. static DEVICE_ATTR_WO(srp);
  1561. static struct attribute *musb_attributes[] = {
  1562. &dev_attr_mode.attr,
  1563. &dev_attr_vbus.attr,
  1564. &dev_attr_srp.attr,
  1565. NULL
  1566. };
  1567. static const struct attribute_group musb_attr_group = {
  1568. .attrs = musb_attributes,
  1569. };
  1570. #define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
  1571. (2 << MUSB_DEVCTL_VBUS_SHIFT) | \
  1572. MUSB_DEVCTL_SESSION)
  1573. #define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
  1574. MUSB_DEVCTL_SESSION)
  1575. /*
  1576. * Check the musb devctl session bit to determine if we want to
  1577. * allow PM runtime for the device. In general, we want to keep things
  1578. * active when the session bit is set except after host disconnect.
  1579. *
  1580. * Only called from musb_irq_work. If this ever needs to get called
  1581. * elsewhere, proper locking must be implemented for musb->session.
  1582. */
  1583. static void musb_pm_runtime_check_session(struct musb *musb)
  1584. {
  1585. u8 devctl, s;
  1586. int error;
  1587. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  1588. /* Handle session status quirks first */
  1589. s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
  1590. MUSB_DEVCTL_HR;
  1591. switch (devctl & ~s) {
  1592. case MUSB_QUIRK_B_INVALID_VBUS_91:
  1593. if (musb->quirk_retries && !musb->flush_irq_work) {
  1594. musb_dbg(musb,
  1595. "Poll devctl on invalid vbus, assume no session");
  1596. schedule_delayed_work(&musb->irq_work,
  1597. msecs_to_jiffies(1000));
  1598. musb->quirk_retries--;
  1599. return;
  1600. }
  1601. /* fall through */
  1602. case MUSB_QUIRK_A_DISCONNECT_19:
  1603. if (musb->quirk_retries && !musb->flush_irq_work) {
  1604. musb_dbg(musb,
  1605. "Poll devctl on possible host mode disconnect");
  1606. schedule_delayed_work(&musb->irq_work,
  1607. msecs_to_jiffies(1000));
  1608. musb->quirk_retries--;
  1609. return;
  1610. }
  1611. if (!musb->session)
  1612. break;
  1613. musb_dbg(musb, "Allow PM on possible host mode disconnect");
  1614. pm_runtime_mark_last_busy(musb->controller);
  1615. pm_runtime_put_autosuspend(musb->controller);
  1616. musb->session = false;
  1617. return;
  1618. default:
  1619. break;
  1620. }
  1621. /* No need to do anything if session has not changed */
  1622. s = devctl & MUSB_DEVCTL_SESSION;
  1623. if (s == musb->session)
  1624. return;
  1625. /* Block PM or allow PM? */
  1626. if (s) {
  1627. musb_dbg(musb, "Block PM on active session: %02x", devctl);
  1628. error = pm_runtime_get_sync(musb->controller);
  1629. if (error < 0)
  1630. dev_err(musb->controller, "Could not enable: %i\n",
  1631. error);
  1632. musb->quirk_retries = 3;
  1633. } else {
  1634. musb_dbg(musb, "Allow PM with no session: %02x", devctl);
  1635. pm_runtime_mark_last_busy(musb->controller);
  1636. pm_runtime_put_autosuspend(musb->controller);
  1637. }
  1638. musb->session = s;
  1639. }
  1640. /* Only used to provide driver mode change events */
  1641. static void musb_irq_work(struct work_struct *data)
  1642. {
  1643. struct musb *musb = container_of(data, struct musb, irq_work.work);
  1644. int error;
  1645. error = pm_runtime_get_sync(musb->controller);
  1646. if (error < 0) {
  1647. dev_err(musb->controller, "Could not enable: %i\n", error);
  1648. return;
  1649. }
  1650. musb_pm_runtime_check_session(musb);
  1651. if (musb->xceiv->otg->state != musb->xceiv_old_state) {
  1652. musb->xceiv_old_state = musb->xceiv->otg->state;
  1653. sysfs_notify(&musb->controller->kobj, NULL, "mode");
  1654. }
  1655. pm_runtime_mark_last_busy(musb->controller);
  1656. pm_runtime_put_autosuspend(musb->controller);
  1657. }
  1658. static void musb_recover_from_babble(struct musb *musb)
  1659. {
  1660. int ret;
  1661. u8 devctl;
  1662. musb_disable_interrupts(musb);
  1663. /*
  1664. * wait at least 320 cycles of 60MHz clock. That's 5.3us, we will give
  1665. * it some slack and wait for 10us.
  1666. */
  1667. udelay(10);
  1668. ret = musb_platform_recover(musb);
  1669. if (ret) {
  1670. musb_enable_interrupts(musb);
  1671. return;
  1672. }
  1673. /* drop session bit */
  1674. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  1675. devctl &= ~MUSB_DEVCTL_SESSION;
  1676. musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
  1677. /* tell usbcore about it */
  1678. musb_root_disconnect(musb);
  1679. /*
  1680. * When a babble condition occurs, the musb controller
  1681. * removes the session bit and the endpoint config is lost.
  1682. */
  1683. if (musb->dyn_fifo)
  1684. ret = ep_config_from_table(musb);
  1685. else
  1686. ret = ep_config_from_hw(musb);
  1687. /* restart session */
  1688. if (ret == 0)
  1689. musb_start(musb);
  1690. }
  1691. /* --------------------------------------------------------------------------
  1692. * Init support
  1693. */
  1694. static struct musb *allocate_instance(struct device *dev,
  1695. const struct musb_hdrc_config *config, void __iomem *mbase)
  1696. {
  1697. struct musb *musb;
  1698. struct musb_hw_ep *ep;
  1699. int epnum;
  1700. int ret;
  1701. musb = devm_kzalloc(dev, sizeof(*musb), GFP_KERNEL);
  1702. if (!musb)
  1703. return NULL;
  1704. INIT_LIST_HEAD(&musb->control);
  1705. INIT_LIST_HEAD(&musb->in_bulk);
  1706. INIT_LIST_HEAD(&musb->out_bulk);
  1707. INIT_LIST_HEAD(&musb->pending_list);
  1708. musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
  1709. musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
  1710. musb->mregs = mbase;
  1711. musb->ctrl_base = mbase;
  1712. musb->nIrq = -ENODEV;
  1713. musb->config = config;
  1714. BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS);
  1715. for (epnum = 0, ep = musb->endpoints;
  1716. epnum < musb->config->num_eps;
  1717. epnum++, ep++) {
  1718. ep->musb = musb;
  1719. ep->epnum = epnum;
  1720. }
  1721. musb->controller = dev;
  1722. ret = musb_host_alloc(musb);
  1723. if (ret < 0)
  1724. goto err_free;
  1725. dev_set_drvdata(dev, musb);
  1726. return musb;
  1727. err_free:
  1728. return NULL;
  1729. }
  1730. static void musb_free(struct musb *musb)
  1731. {
  1732. /* this has multiple entry modes. it handles fault cleanup after
  1733. * probe(), where things may be partially set up, as well as rmmod
  1734. * cleanup after everything's been de-activated.
  1735. */
  1736. #ifdef CONFIG_SYSFS
  1737. sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
  1738. #endif
  1739. if (musb->nIrq >= 0) {
  1740. if (musb->irq_wake)
  1741. disable_irq_wake(musb->nIrq);
  1742. free_irq(musb->nIrq, musb);
  1743. }
  1744. musb_host_free(musb);
  1745. }
  1746. struct musb_pending_work {
  1747. int (*callback)(struct musb *musb, void *data);
  1748. void *data;
  1749. struct list_head node;
  1750. };
  1751. #ifdef CONFIG_PM
  1752. /*
  1753. * Called from musb_runtime_resume(), musb_resume(), and
  1754. * musb_queue_resume_work(). Callers must take musb->lock.
  1755. */
  1756. static int musb_run_resume_work(struct musb *musb)
  1757. {
  1758. struct musb_pending_work *w, *_w;
  1759. unsigned long flags;
  1760. int error = 0;
  1761. spin_lock_irqsave(&musb->list_lock, flags);
  1762. list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
  1763. if (w->callback) {
  1764. error = w->callback(musb, w->data);
  1765. if (error < 0) {
  1766. dev_err(musb->controller,
  1767. "resume callback %p failed: %i\n",
  1768. w->callback, error);
  1769. }
  1770. }
  1771. list_del(&w->node);
  1772. devm_kfree(musb->controller, w);
  1773. }
  1774. spin_unlock_irqrestore(&musb->list_lock, flags);
  1775. return error;
  1776. }
  1777. #endif
  1778. /*
  1779. * Called to run work if device is active or else queue the work to happen
  1780. * on resume. Caller must take musb->lock and must hold an RPM reference.
  1781. *
  1782. * Note that we cowardly refuse queuing work after musb PM runtime
  1783. * resume is done calling musb_run_resume_work() and return -EINPROGRESS
  1784. * instead.
  1785. */
  1786. int musb_queue_resume_work(struct musb *musb,
  1787. int (*callback)(struct musb *musb, void *data),
  1788. void *data)
  1789. {
  1790. struct musb_pending_work *w;
  1791. unsigned long flags;
  1792. int error;
  1793. if (WARN_ON(!callback))
  1794. return -EINVAL;
  1795. if (pm_runtime_active(musb->controller))
  1796. return callback(musb, data);
  1797. w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
  1798. if (!w)
  1799. return -ENOMEM;
  1800. w->callback = callback;
  1801. w->data = data;
  1802. spin_lock_irqsave(&musb->list_lock, flags);
  1803. if (musb->is_runtime_suspended) {
  1804. list_add_tail(&w->node, &musb->pending_list);
  1805. error = 0;
  1806. } else {
  1807. dev_err(musb->controller, "could not add resume work %p\n",
  1808. callback);
  1809. devm_kfree(musb->controller, w);
  1810. error = -EINPROGRESS;
  1811. }
  1812. spin_unlock_irqrestore(&musb->list_lock, flags);
  1813. return error;
  1814. }
  1815. EXPORT_SYMBOL_GPL(musb_queue_resume_work);
  1816. static void musb_deassert_reset(struct work_struct *work)
  1817. {
  1818. struct musb *musb;
  1819. unsigned long flags;
  1820. musb = container_of(work, struct musb, deassert_reset_work.work);
  1821. spin_lock_irqsave(&musb->lock, flags);
  1822. if (musb->port1_status & USB_PORT_STAT_RESET)
  1823. musb_port_reset(musb, false);
  1824. spin_unlock_irqrestore(&musb->lock, flags);
  1825. }
  1826. /*
  1827. * Perform generic per-controller initialization.
  1828. *
  1829. * @dev: the controller (already clocked, etc)
  1830. * @nIrq: IRQ number
  1831. * @ctrl: virtual address of controller registers,
  1832. * not yet corrected for platform-specific offsets
  1833. */
  1834. static int
  1835. musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
  1836. {
  1837. int status;
  1838. struct musb *musb;
  1839. struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
  1840. /* The driver might handle more features than the board; OK.
  1841. * Fail when the board needs a feature that's not enabled.
  1842. */
  1843. if (!plat) {
  1844. dev_err(dev, "no platform_data?\n");
  1845. status = -ENODEV;
  1846. goto fail0;
  1847. }
  1848. /* allocate */
  1849. musb = allocate_instance(dev, plat->config, ctrl);
  1850. if (!musb) {
  1851. status = -ENOMEM;
  1852. goto fail0;
  1853. }
  1854. spin_lock_init(&musb->lock);
  1855. spin_lock_init(&musb->list_lock);
  1856. musb->board_set_power = plat->set_power;
  1857. musb->min_power = plat->min_power;
  1858. musb->ops = plat->platform_ops;
  1859. musb->port_mode = plat->mode;
  1860. /*
  1861. * Initialize the default IO functions. At least omap2430 needs
  1862. * these early. We initialize the platform specific IO functions
  1863. * later on.
  1864. */
  1865. musb_readb = musb_default_readb;
  1866. musb_writeb = musb_default_writeb;
  1867. musb_readw = musb_default_readw;
  1868. musb_writew = musb_default_writew;
  1869. musb_readl = musb_default_readl;
  1870. musb_writel = musb_default_writel;
  1871. /* The musb_platform_init() call:
  1872. * - adjusts musb->mregs
  1873. * - sets the musb->isr
  1874. * - may initialize an integrated transceiver
  1875. * - initializes musb->xceiv, usually by otg_get_phy()
  1876. * - stops powering VBUS
  1877. *
  1878. * There are various transceiver configurations. Blackfin,
  1879. * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
  1880. * external/discrete ones in various flavors (twl4030 family,
  1881. * isp1504, non-OTG, etc) mostly hooking up through ULPI.
  1882. */
  1883. status = musb_platform_init(musb);
  1884. if (status < 0)
  1885. goto fail1;
  1886. if (!musb->isr) {
  1887. status = -ENODEV;
  1888. goto fail2;
  1889. }
  1890. if (musb->ops->quirks)
  1891. musb->io.quirks = musb->ops->quirks;
  1892. /* Most devices use indexed offset or flat offset */
  1893. if (musb->io.quirks & MUSB_INDEXED_EP) {
  1894. musb->io.ep_offset = musb_indexed_ep_offset;
  1895. musb->io.ep_select = musb_indexed_ep_select;
  1896. } else {
  1897. musb->io.ep_offset = musb_flat_ep_offset;
  1898. musb->io.ep_select = musb_flat_ep_select;
  1899. }
  1900. if (musb->io.quirks & MUSB_G_NO_SKB_RESERVE)
  1901. musb->g.quirk_avoids_skb_reserve = 1;
  1902. /* At least tusb6010 has its own offsets */
  1903. if (musb->ops->ep_offset)
  1904. musb->io.ep_offset = musb->ops->ep_offset;
  1905. if (musb->ops->ep_select)
  1906. musb->io.ep_select = musb->ops->ep_select;
  1907. if (musb->ops->fifo_mode)
  1908. fifo_mode = musb->ops->fifo_mode;
  1909. else
  1910. fifo_mode = 4;
  1911. if (musb->ops->fifo_offset)
  1912. musb->io.fifo_offset = musb->ops->fifo_offset;
  1913. else
  1914. musb->io.fifo_offset = musb_default_fifo_offset;
  1915. if (musb->ops->busctl_offset)
  1916. musb->io.busctl_offset = musb->ops->busctl_offset;
  1917. else
  1918. musb->io.busctl_offset = musb_default_busctl_offset;
  1919. if (musb->ops->readb)
  1920. musb_readb = musb->ops->readb;
  1921. if (musb->ops->writeb)
  1922. musb_writeb = musb->ops->writeb;
  1923. if (musb->ops->readw)
  1924. musb_readw = musb->ops->readw;
  1925. if (musb->ops->writew)
  1926. musb_writew = musb->ops->writew;
  1927. if (musb->ops->readl)
  1928. musb_readl = musb->ops->readl;
  1929. if (musb->ops->writel)
  1930. musb_writel = musb->ops->writel;
  1931. #ifndef CONFIG_MUSB_PIO_ONLY
  1932. if (!musb->ops->dma_init || !musb->ops->dma_exit) {
  1933. dev_err(dev, "DMA controller not set\n");
  1934. status = -ENODEV;
  1935. goto fail2;
  1936. }
  1937. musb_dma_controller_create = musb->ops->dma_init;
  1938. musb_dma_controller_destroy = musb->ops->dma_exit;
  1939. #endif
  1940. if (musb->ops->read_fifo)
  1941. musb->io.read_fifo = musb->ops->read_fifo;
  1942. else
  1943. musb->io.read_fifo = musb_default_read_fifo;
  1944. if (musb->ops->write_fifo)
  1945. musb->io.write_fifo = musb->ops->write_fifo;
  1946. else
  1947. musb->io.write_fifo = musb_default_write_fifo;
  1948. if (!musb->xceiv->io_ops) {
  1949. musb->xceiv->io_dev = musb->controller;
  1950. musb->xceiv->io_priv = musb->mregs;
  1951. musb->xceiv->io_ops = &musb_ulpi_access;
  1952. }
  1953. if (musb->ops->phy_callback)
  1954. musb_phy_callback = musb->ops->phy_callback;
  1955. /*
  1956. * We need musb_read/write functions initialized for PM.
  1957. * Note that at least 2430 glue needs autosuspend delay
  1958. * somewhere above 300 ms for the hardware to idle properly
  1959. * after disconnecting the cable in host mode. Let's use
  1960. * 500 ms for some margin.
  1961. */
  1962. pm_runtime_use_autosuspend(musb->controller);
  1963. pm_runtime_set_autosuspend_delay(musb->controller, 500);
  1964. pm_runtime_enable(musb->controller);
  1965. pm_runtime_get_sync(musb->controller);
  1966. status = usb_phy_init(musb->xceiv);
  1967. if (status < 0)
  1968. goto err_usb_phy_init;
  1969. if (use_dma && dev->dma_mask) {
  1970. musb->dma_controller =
  1971. musb_dma_controller_create(musb, musb->mregs);
  1972. if (IS_ERR(musb->dma_controller)) {
  1973. status = PTR_ERR(musb->dma_controller);
  1974. goto fail2_5;
  1975. }
  1976. }
  1977. /* be sure interrupts are disabled before connecting ISR */
  1978. musb_platform_disable(musb);
  1979. musb_disable_interrupts(musb);
  1980. musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
  1981. /* Init IRQ workqueue before request_irq */
  1982. INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
  1983. INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
  1984. INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
  1985. /* setup musb parts of the core (especially endpoints) */
  1986. status = musb_core_init(plat->config->multipoint
  1987. ? MUSB_CONTROLLER_MHDRC
  1988. : MUSB_CONTROLLER_HDRC, musb);
  1989. if (status < 0)
  1990. goto fail3;
  1991. timer_setup(&musb->otg_timer, musb_otg_timer_func, 0);
  1992. /* attach to the IRQ */
  1993. if (request_irq(nIrq, musb->isr, IRQF_SHARED, dev_name(dev), musb)) {
  1994. dev_err(dev, "request_irq %d failed!\n", nIrq);
  1995. status = -ENODEV;
  1996. goto fail3;
  1997. }
  1998. musb->nIrq = nIrq;
  1999. /* FIXME this handles wakeup irqs wrong */
  2000. if (enable_irq_wake(nIrq) == 0) {
  2001. musb->irq_wake = 1;
  2002. device_init_wakeup(dev, 1);
  2003. } else {
  2004. musb->irq_wake = 0;
  2005. }
  2006. /* program PHY to use external vBus if required */
  2007. if (plat->extvbus) {
  2008. u8 busctl = musb_read_ulpi_buscontrol(musb->mregs);
  2009. busctl |= MUSB_ULPI_USE_EXTVBUS;
  2010. musb_write_ulpi_buscontrol(musb->mregs, busctl);
  2011. }
  2012. if (musb->xceiv->otg->default_a) {
  2013. MUSB_HST_MODE(musb);
  2014. musb->xceiv->otg->state = OTG_STATE_A_IDLE;
  2015. } else {
  2016. MUSB_DEV_MODE(musb);
  2017. musb->xceiv->otg->state = OTG_STATE_B_IDLE;
  2018. }
  2019. switch (musb->port_mode) {
  2020. case MUSB_PORT_MODE_HOST:
  2021. status = musb_host_setup(musb, plat->power);
  2022. if (status < 0)
  2023. goto fail3;
  2024. status = musb_platform_set_mode(musb, MUSB_HOST);
  2025. break;
  2026. case MUSB_PORT_MODE_GADGET:
  2027. status = musb_gadget_setup(musb);
  2028. if (status < 0)
  2029. goto fail3;
  2030. status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
  2031. break;
  2032. case MUSB_PORT_MODE_DUAL_ROLE:
  2033. status = musb_host_setup(musb, plat->power);
  2034. if (status < 0)
  2035. goto fail3;
  2036. status = musb_gadget_setup(musb);
  2037. if (status) {
  2038. musb_host_cleanup(musb);
  2039. goto fail3;
  2040. }
  2041. status = musb_platform_set_mode(musb, MUSB_OTG);
  2042. break;
  2043. default:
  2044. dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
  2045. break;
  2046. }
  2047. if (status < 0)
  2048. goto fail3;
  2049. status = musb_init_debugfs(musb);
  2050. if (status < 0)
  2051. goto fail4;
  2052. status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
  2053. if (status)
  2054. goto fail5;
  2055. musb->is_initialized = 1;
  2056. pm_runtime_mark_last_busy(musb->controller);
  2057. pm_runtime_put_autosuspend(musb->controller);
  2058. return 0;
  2059. fail5:
  2060. musb_exit_debugfs(musb);
  2061. fail4:
  2062. musb_gadget_cleanup(musb);
  2063. musb_host_cleanup(musb);
  2064. fail3:
  2065. cancel_delayed_work_sync(&musb->irq_work);
  2066. cancel_delayed_work_sync(&musb->finish_resume_work);
  2067. cancel_delayed_work_sync(&musb->deassert_reset_work);
  2068. if (musb->dma_controller)
  2069. musb_dma_controller_destroy(musb->dma_controller);
  2070. fail2_5:
  2071. usb_phy_shutdown(musb->xceiv);
  2072. err_usb_phy_init:
  2073. pm_runtime_dont_use_autosuspend(musb->controller);
  2074. pm_runtime_put_sync(musb->controller);
  2075. pm_runtime_disable(musb->controller);
  2076. fail2:
  2077. if (musb->irq_wake)
  2078. device_init_wakeup(dev, 0);
  2079. musb_platform_exit(musb);
  2080. fail1:
  2081. if (status != -EPROBE_DEFER)
  2082. dev_err(musb->controller,
  2083. "%s failed with status %d\n", __func__, status);
  2084. musb_free(musb);
  2085. fail0:
  2086. return status;
  2087. }
  2088. /*-------------------------------------------------------------------------*/
  2089. /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
  2090. * bridge to a platform device; this driver then suffices.
  2091. */
  2092. static int musb_probe(struct platform_device *pdev)
  2093. {
  2094. struct device *dev = &pdev->dev;
  2095. int irq = platform_get_irq_byname(pdev, "mc");
  2096. struct resource *iomem;
  2097. void __iomem *base;
  2098. if (irq <= 0)
  2099. return -ENODEV;
  2100. iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2101. base = devm_ioremap_resource(dev, iomem);
  2102. if (IS_ERR(base))
  2103. return PTR_ERR(base);
  2104. return musb_init_controller(dev, irq, base);
  2105. }
  2106. static int musb_remove(struct platform_device *pdev)
  2107. {
  2108. struct device *dev = &pdev->dev;
  2109. struct musb *musb = dev_to_musb(dev);
  2110. unsigned long flags;
  2111. /* this gets called on rmmod.
  2112. * - Host mode: host may still be active
  2113. * - Peripheral mode: peripheral is deactivated (or never-activated)
  2114. * - OTG mode: both roles are deactivated (or never-activated)
  2115. */
  2116. musb_exit_debugfs(musb);
  2117. cancel_delayed_work_sync(&musb->irq_work);
  2118. cancel_delayed_work_sync(&musb->finish_resume_work);
  2119. cancel_delayed_work_sync(&musb->deassert_reset_work);
  2120. pm_runtime_get_sync(musb->controller);
  2121. musb_host_cleanup(musb);
  2122. musb_gadget_cleanup(musb);
  2123. musb_platform_disable(musb);
  2124. spin_lock_irqsave(&musb->lock, flags);
  2125. musb_disable_interrupts(musb);
  2126. musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
  2127. spin_unlock_irqrestore(&musb->lock, flags);
  2128. pm_runtime_dont_use_autosuspend(musb->controller);
  2129. pm_runtime_put_sync(musb->controller);
  2130. pm_runtime_disable(musb->controller);
  2131. musb_platform_exit(musb);
  2132. musb_phy_callback = NULL;
  2133. if (musb->dma_controller)
  2134. musb_dma_controller_destroy(musb->dma_controller);
  2135. usb_phy_shutdown(musb->xceiv);
  2136. musb_free(musb);
  2137. device_init_wakeup(dev, 0);
  2138. return 0;
  2139. }
  2140. #ifdef CONFIG_PM
  2141. static void musb_save_context(struct musb *musb)
  2142. {
  2143. int i;
  2144. void __iomem *musb_base = musb->mregs;
  2145. void __iomem *epio;
  2146. musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
  2147. musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
  2148. musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
  2149. musb->context.power = musb_readb(musb_base, MUSB_POWER);
  2150. musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
  2151. musb->context.index = musb_readb(musb_base, MUSB_INDEX);
  2152. musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
  2153. for (i = 0; i < musb->config->num_eps; ++i) {
  2154. struct musb_hw_ep *hw_ep;
  2155. hw_ep = &musb->endpoints[i];
  2156. if (!hw_ep)
  2157. continue;
  2158. epio = hw_ep->regs;
  2159. if (!epio)
  2160. continue;
  2161. musb_writeb(musb_base, MUSB_INDEX, i);
  2162. musb->context.index_regs[i].txmaxp =
  2163. musb_readw(epio, MUSB_TXMAXP);
  2164. musb->context.index_regs[i].txcsr =
  2165. musb_readw(epio, MUSB_TXCSR);
  2166. musb->context.index_regs[i].rxmaxp =
  2167. musb_readw(epio, MUSB_RXMAXP);
  2168. musb->context.index_regs[i].rxcsr =
  2169. musb_readw(epio, MUSB_RXCSR);
  2170. if (musb->dyn_fifo) {
  2171. musb->context.index_regs[i].txfifoadd =
  2172. musb_read_txfifoadd(musb_base);
  2173. musb->context.index_regs[i].rxfifoadd =
  2174. musb_read_rxfifoadd(musb_base);
  2175. musb->context.index_regs[i].txfifosz =
  2176. musb_read_txfifosz(musb_base);
  2177. musb->context.index_regs[i].rxfifosz =
  2178. musb_read_rxfifosz(musb_base);
  2179. }
  2180. musb->context.index_regs[i].txtype =
  2181. musb_readb(epio, MUSB_TXTYPE);
  2182. musb->context.index_regs[i].txinterval =
  2183. musb_readb(epio, MUSB_TXINTERVAL);
  2184. musb->context.index_regs[i].rxtype =
  2185. musb_readb(epio, MUSB_RXTYPE);
  2186. musb->context.index_regs[i].rxinterval =
  2187. musb_readb(epio, MUSB_RXINTERVAL);
  2188. musb->context.index_regs[i].txfunaddr =
  2189. musb_read_txfunaddr(musb, i);
  2190. musb->context.index_regs[i].txhubaddr =
  2191. musb_read_txhubaddr(musb, i);
  2192. musb->context.index_regs[i].txhubport =
  2193. musb_read_txhubport(musb, i);
  2194. musb->context.index_regs[i].rxfunaddr =
  2195. musb_read_rxfunaddr(musb, i);
  2196. musb->context.index_regs[i].rxhubaddr =
  2197. musb_read_rxhubaddr(musb, i);
  2198. musb->context.index_regs[i].rxhubport =
  2199. musb_read_rxhubport(musb, i);
  2200. }
  2201. }
  2202. static void musb_restore_context(struct musb *musb)
  2203. {
  2204. int i;
  2205. void __iomem *musb_base = musb->mregs;
  2206. void __iomem *epio;
  2207. u8 power;
  2208. musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
  2209. musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
  2210. musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
  2211. /* Don't affect SUSPENDM/RESUME bits in POWER reg */
  2212. power = musb_readb(musb_base, MUSB_POWER);
  2213. power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
  2214. musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
  2215. power |= musb->context.power;
  2216. musb_writeb(musb_base, MUSB_POWER, power);
  2217. musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
  2218. musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
  2219. musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
  2220. if (musb->context.devctl & MUSB_DEVCTL_SESSION)
  2221. musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
  2222. for (i = 0; i < musb->config->num_eps; ++i) {
  2223. struct musb_hw_ep *hw_ep;
  2224. hw_ep = &musb->endpoints[i];
  2225. if (!hw_ep)
  2226. continue;
  2227. epio = hw_ep->regs;
  2228. if (!epio)
  2229. continue;
  2230. musb_writeb(musb_base, MUSB_INDEX, i);
  2231. musb_writew(epio, MUSB_TXMAXP,
  2232. musb->context.index_regs[i].txmaxp);
  2233. musb_writew(epio, MUSB_TXCSR,
  2234. musb->context.index_regs[i].txcsr);
  2235. musb_writew(epio, MUSB_RXMAXP,
  2236. musb->context.index_regs[i].rxmaxp);
  2237. musb_writew(epio, MUSB_RXCSR,
  2238. musb->context.index_regs[i].rxcsr);
  2239. if (musb->dyn_fifo) {
  2240. musb_write_txfifosz(musb_base,
  2241. musb->context.index_regs[i].txfifosz);
  2242. musb_write_rxfifosz(musb_base,
  2243. musb->context.index_regs[i].rxfifosz);
  2244. musb_write_txfifoadd(musb_base,
  2245. musb->context.index_regs[i].txfifoadd);
  2246. musb_write_rxfifoadd(musb_base,
  2247. musb->context.index_regs[i].rxfifoadd);
  2248. }
  2249. musb_writeb(epio, MUSB_TXTYPE,
  2250. musb->context.index_regs[i].txtype);
  2251. musb_writeb(epio, MUSB_TXINTERVAL,
  2252. musb->context.index_regs[i].txinterval);
  2253. musb_writeb(epio, MUSB_RXTYPE,
  2254. musb->context.index_regs[i].rxtype);
  2255. musb_writeb(epio, MUSB_RXINTERVAL,
  2256. musb->context.index_regs[i].rxinterval);
  2257. musb_write_txfunaddr(musb, i,
  2258. musb->context.index_regs[i].txfunaddr);
  2259. musb_write_txhubaddr(musb, i,
  2260. musb->context.index_regs[i].txhubaddr);
  2261. musb_write_txhubport(musb, i,
  2262. musb->context.index_regs[i].txhubport);
  2263. musb_write_rxfunaddr(musb, i,
  2264. musb->context.index_regs[i].rxfunaddr);
  2265. musb_write_rxhubaddr(musb, i,
  2266. musb->context.index_regs[i].rxhubaddr);
  2267. musb_write_rxhubport(musb, i,
  2268. musb->context.index_regs[i].rxhubport);
  2269. }
  2270. musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
  2271. }
  2272. static int musb_suspend(struct device *dev)
  2273. {
  2274. struct musb *musb = dev_to_musb(dev);
  2275. unsigned long flags;
  2276. int ret;
  2277. ret = pm_runtime_get_sync(dev);
  2278. if (ret < 0) {
  2279. pm_runtime_put_noidle(dev);
  2280. return ret;
  2281. }
  2282. musb_platform_disable(musb);
  2283. musb_disable_interrupts(musb);
  2284. musb->flush_irq_work = true;
  2285. while (flush_delayed_work(&musb->irq_work))
  2286. ;
  2287. musb->flush_irq_work = false;
  2288. if (!(musb->io.quirks & MUSB_PRESERVE_SESSION))
  2289. musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
  2290. WARN_ON(!list_empty(&musb->pending_list));
  2291. spin_lock_irqsave(&musb->lock, flags);
  2292. if (is_peripheral_active(musb)) {
  2293. /* FIXME force disconnect unless we know USB will wake
  2294. * the system up quickly enough to respond ...
  2295. */
  2296. } else if (is_host_active(musb)) {
  2297. /* we know all the children are suspended; sometimes
  2298. * they will even be wakeup-enabled.
  2299. */
  2300. }
  2301. musb_save_context(musb);
  2302. spin_unlock_irqrestore(&musb->lock, flags);
  2303. return 0;
  2304. }
  2305. static int musb_resume(struct device *dev)
  2306. {
  2307. struct musb *musb = dev_to_musb(dev);
  2308. unsigned long flags;
  2309. int error;
  2310. u8 devctl;
  2311. u8 mask;
  2312. /*
  2313. * For static cmos like DaVinci, register values were preserved
  2314. * unless for some reason the whole soc powered down or the USB
  2315. * module got reset through the PSC (vs just being disabled).
  2316. *
  2317. * For the DSPS glue layer though, a full register restore has to
  2318. * be done. As it shouldn't harm other platforms, we do it
  2319. * unconditionally.
  2320. */
  2321. musb_restore_context(musb);
  2322. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  2323. mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
  2324. if ((devctl & mask) != (musb->context.devctl & mask))
  2325. musb->port1_status = 0;
  2326. musb_start(musb);
  2327. spin_lock_irqsave(&musb->lock, flags);
  2328. error = musb_run_resume_work(musb);
  2329. if (error)
  2330. dev_err(musb->controller, "resume work failed with %i\n",
  2331. error);
  2332. spin_unlock_irqrestore(&musb->lock, flags);
  2333. pm_runtime_mark_last_busy(dev);
  2334. pm_runtime_put_autosuspend(dev);
  2335. return 0;
  2336. }
  2337. static int musb_runtime_suspend(struct device *dev)
  2338. {
  2339. struct musb *musb = dev_to_musb(dev);
  2340. musb_save_context(musb);
  2341. musb->is_runtime_suspended = 1;
  2342. return 0;
  2343. }
  2344. static int musb_runtime_resume(struct device *dev)
  2345. {
  2346. struct musb *musb = dev_to_musb(dev);
  2347. unsigned long flags;
  2348. int error;
  2349. /*
  2350. * When pm_runtime_get_sync called for the first time in driver
  2351. * init, some of the structure is still not initialized which is
  2352. * used in restore function. But clock needs to be
  2353. * enabled before any register access, so
  2354. * pm_runtime_get_sync has to be called.
  2355. * Also context restore without save does not make
  2356. * any sense
  2357. */
  2358. if (!musb->is_initialized)
  2359. return 0;
  2360. musb_restore_context(musb);
  2361. spin_lock_irqsave(&musb->lock, flags);
  2362. error = musb_run_resume_work(musb);
  2363. if (error)
  2364. dev_err(musb->controller, "resume work failed with %i\n",
  2365. error);
  2366. musb->is_runtime_suspended = 0;
  2367. spin_unlock_irqrestore(&musb->lock, flags);
  2368. return 0;
  2369. }
  2370. static const struct dev_pm_ops musb_dev_pm_ops = {
  2371. .suspend = musb_suspend,
  2372. .resume = musb_resume,
  2373. .runtime_suspend = musb_runtime_suspend,
  2374. .runtime_resume = musb_runtime_resume,
  2375. };
  2376. #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
  2377. #else
  2378. #define MUSB_DEV_PM_OPS NULL
  2379. #endif
  2380. static struct platform_driver musb_driver = {
  2381. .driver = {
  2382. .name = (char *)musb_driver_name,
  2383. .bus = &platform_bus_type,
  2384. .pm = MUSB_DEV_PM_OPS,
  2385. },
  2386. .probe = musb_probe,
  2387. .remove = musb_remove,
  2388. };
  2389. module_platform_driver(musb_driver);