musb_core.c 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539
  1. /*
  2. * MUSB OTG driver core code
  3. *
  4. * Copyright 2005 Mentor Graphics Corporation
  5. * Copyright (C) 2005-2006 by Texas Instruments
  6. * Copyright (C) 2006-2007 Nokia Corporation
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  20. * 02110-1301 USA
  21. *
  22. * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  23. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  24. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
  25. * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  26. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  27. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  28. * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  29. * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  30. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  31. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  32. *
  33. */
  34. /*
  35. * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
  36. *
  37. * This consists of a Host Controller Driver (HCD) and a peripheral
  38. * controller driver implementing the "Gadget" API; OTG support is
  39. * in the works. These are normal Linux-USB controller drivers which
  40. * use IRQs and have no dedicated thread.
  41. *
  42. * This version of the driver has only been used with products from
  43. * Texas Instruments. Those products integrate the Inventra logic
  44. * with other DMA, IRQ, and bus modules, as well as other logic that
  45. * needs to be reflected in this driver.
  46. *
  47. *
  48. * NOTE: the original Mentor code here was pretty much a collection
  49. * of mechanisms that don't seem to have been fully integrated/working
  50. * for *any* Linux kernel version. This version aims at Linux 2.6.now,
  51. * Key open issues include:
  52. *
  53. * - Lack of host-side transaction scheduling, for all transfer types.
  54. * The hardware doesn't do it; instead, software must.
  55. *
  56. * This is not an issue for OTG devices that don't support external
  57. * hubs, but for more "normal" USB hosts it's a user issue that the
  58. * "multipoint" support doesn't scale in the expected ways. That
  59. * includes DaVinci EVM in a common non-OTG mode.
  60. *
  61. * * Control and bulk use dedicated endpoints, and there's as
  62. * yet no mechanism to either (a) reclaim the hardware when
  63. * peripherals are NAKing, which gets complicated with bulk
  64. * endpoints, or (b) use more than a single bulk endpoint in
  65. * each direction.
  66. *
  67. * RESULT: one device may be perceived as blocking another one.
  68. *
  69. * * Interrupt and isochronous will dynamically allocate endpoint
  70. * hardware, but (a) there's no record keeping for bandwidth;
  71. * (b) in the common case that few endpoints are available, there
  72. * is no mechanism to reuse endpoints to talk to multiple devices.
  73. *
  74. * RESULT: At one extreme, bandwidth can be overcommitted in
  75. * some hardware configurations, no faults will be reported.
  76. * At the other extreme, the bandwidth capabilities which do
  77. * exist tend to be severely undercommitted. You can't yet hook
  78. * up both a keyboard and a mouse to an external USB hub.
  79. */
  80. /*
  81. * This gets many kinds of configuration information:
  82. * - Kconfig for everything user-configurable
  83. * - platform_device for addressing, irq, and platform_data
  84. * - platform_data is mostly for board-specific information
  85. * (plus recentrly, SOC or family details)
  86. *
  87. * Most of the conditional compilation will (someday) vanish.
  88. */
  89. #include <linux/module.h>
  90. #include <linux/kernel.h>
  91. #include <linux/sched.h>
  92. #include <linux/slab.h>
  93. #include <linux/list.h>
  94. #include <linux/kobject.h>
  95. #include <linux/prefetch.h>
  96. #include <linux/platform_device.h>
  97. #include <linux/io.h>
  98. #include <linux/dma-mapping.h>
  99. #include <linux/usb.h>
  100. #include "musb_core.h"
  101. #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
  102. #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
  103. #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
  104. #define MUSB_VERSION "6.0"
  105. #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
  106. #define MUSB_DRIVER_NAME "musb-hdrc"
  107. const char musb_driver_name[] = MUSB_DRIVER_NAME;
  108. MODULE_DESCRIPTION(DRIVER_INFO);
  109. MODULE_AUTHOR(DRIVER_AUTHOR);
  110. MODULE_LICENSE("GPL");
  111. MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
  112. /*-------------------------------------------------------------------------*/
  113. static inline struct musb *dev_to_musb(struct device *dev)
  114. {
  115. return dev_get_drvdata(dev);
  116. }
  117. /*-------------------------------------------------------------------------*/
  118. #ifndef CONFIG_BLACKFIN
  119. static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
  120. {
  121. void __iomem *addr = phy->io_priv;
  122. int i = 0;
  123. u8 r;
  124. u8 power;
  125. int ret;
  126. pm_runtime_get_sync(phy->io_dev);
  127. /* Make sure the transceiver is not in low power mode */
  128. power = musb_readb(addr, MUSB_POWER);
  129. power &= ~MUSB_POWER_SUSPENDM;
  130. musb_writeb(addr, MUSB_POWER, power);
  131. /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
  132. * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
  133. */
  134. musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
  135. musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
  136. MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
  137. while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
  138. & MUSB_ULPI_REG_CMPLT)) {
  139. i++;
  140. if (i == 10000) {
  141. ret = -ETIMEDOUT;
  142. goto out;
  143. }
  144. }
  145. r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
  146. r &= ~MUSB_ULPI_REG_CMPLT;
  147. musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
  148. ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
  149. out:
  150. pm_runtime_put(phy->io_dev);
  151. return ret;
  152. }
  153. static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
  154. {
  155. void __iomem *addr = phy->io_priv;
  156. int i = 0;
  157. u8 r = 0;
  158. u8 power;
  159. int ret = 0;
  160. pm_runtime_get_sync(phy->io_dev);
  161. /* Make sure the transceiver is not in low power mode */
  162. power = musb_readb(addr, MUSB_POWER);
  163. power &= ~MUSB_POWER_SUSPENDM;
  164. musb_writeb(addr, MUSB_POWER, power);
  165. musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
  166. musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data);
  167. musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
  168. while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
  169. & MUSB_ULPI_REG_CMPLT)) {
  170. i++;
  171. if (i == 10000) {
  172. ret = -ETIMEDOUT;
  173. goto out;
  174. }
  175. }
  176. r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
  177. r &= ~MUSB_ULPI_REG_CMPLT;
  178. musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
  179. out:
  180. pm_runtime_put(phy->io_dev);
  181. return ret;
  182. }
  183. #else
  184. #define musb_ulpi_read NULL
  185. #define musb_ulpi_write NULL
  186. #endif
  187. static struct usb_phy_io_ops musb_ulpi_access = {
  188. .read = musb_ulpi_read,
  189. .write = musb_ulpi_write,
  190. };
  191. /*-------------------------------------------------------------------------*/
  192. static u32 musb_default_fifo_offset(u8 epnum)
  193. {
  194. return 0x20 + (epnum * 4);
  195. }
  196. /* "flat" mapping: each endpoint has its own i/o address */
  197. static void musb_flat_ep_select(void __iomem *mbase, u8 epnum)
  198. {
  199. }
  200. static u32 musb_flat_ep_offset(u8 epnum, u16 offset)
  201. {
  202. return 0x100 + (0x10 * epnum) + offset;
  203. }
  204. /* "indexed" mapping: INDEX register controls register bank select */
  205. static void musb_indexed_ep_select(void __iomem *mbase, u8 epnum)
  206. {
  207. musb_writeb(mbase, MUSB_INDEX, epnum);
  208. }
  209. static u32 musb_indexed_ep_offset(u8 epnum, u16 offset)
  210. {
  211. return 0x10 + offset;
  212. }
  213. static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
  214. {
  215. return __raw_readb(addr + offset);
  216. }
  217. static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
  218. {
  219. __raw_writeb(data, addr + offset);
  220. }
  221. static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
  222. {
  223. return __raw_readw(addr + offset);
  224. }
  225. static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
  226. {
  227. __raw_writew(data, addr + offset);
  228. }
  229. static u32 musb_default_readl(const void __iomem *addr, unsigned offset)
  230. {
  231. return __raw_readl(addr + offset);
  232. }
  233. static void musb_default_writel(void __iomem *addr, unsigned offset, u32 data)
  234. {
  235. __raw_writel(data, addr + offset);
  236. }
  237. /*
  238. * Load an endpoint's FIFO
  239. */
  240. static void musb_default_write_fifo(struct musb_hw_ep *hw_ep, u16 len,
  241. const u8 *src)
  242. {
  243. struct musb *musb = hw_ep->musb;
  244. void __iomem *fifo = hw_ep->fifo;
  245. if (unlikely(len == 0))
  246. return;
  247. prefetch((u8 *)src);
  248. dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
  249. 'T', hw_ep->epnum, fifo, len, src);
  250. /* we can't assume unaligned reads work */
  251. if (likely((0x01 & (unsigned long) src) == 0)) {
  252. u16 index = 0;
  253. /* best case is 32bit-aligned source address */
  254. if ((0x02 & (unsigned long) src) == 0) {
  255. if (len >= 4) {
  256. iowrite32_rep(fifo, src + index, len >> 2);
  257. index += len & ~0x03;
  258. }
  259. if (len & 0x02) {
  260. musb_writew(fifo, 0, *(u16 *)&src[index]);
  261. index += 2;
  262. }
  263. } else {
  264. if (len >= 2) {
  265. iowrite16_rep(fifo, src + index, len >> 1);
  266. index += len & ~0x01;
  267. }
  268. }
  269. if (len & 0x01)
  270. musb_writeb(fifo, 0, src[index]);
  271. } else {
  272. /* byte aligned */
  273. iowrite8_rep(fifo, src, len);
  274. }
  275. }
  276. /*
  277. * Unload an endpoint's FIFO
  278. */
  279. static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
  280. {
  281. struct musb *musb = hw_ep->musb;
  282. void __iomem *fifo = hw_ep->fifo;
  283. if (unlikely(len == 0))
  284. return;
  285. dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
  286. 'R', hw_ep->epnum, fifo, len, dst);
  287. /* we can't assume unaligned writes work */
  288. if (likely((0x01 & (unsigned long) dst) == 0)) {
  289. u16 index = 0;
  290. /* best case is 32bit-aligned destination address */
  291. if ((0x02 & (unsigned long) dst) == 0) {
  292. if (len >= 4) {
  293. ioread32_rep(fifo, dst, len >> 2);
  294. index = len & ~0x03;
  295. }
  296. if (len & 0x02) {
  297. *(u16 *)&dst[index] = musb_readw(fifo, 0);
  298. index += 2;
  299. }
  300. } else {
  301. if (len >= 2) {
  302. ioread16_rep(fifo, dst, len >> 1);
  303. index = len & ~0x01;
  304. }
  305. }
  306. if (len & 0x01)
  307. dst[index] = musb_readb(fifo, 0);
  308. } else {
  309. /* byte aligned */
  310. ioread8_rep(fifo, dst, len);
  311. }
  312. }
  313. /*
  314. * Old style IO functions
  315. */
  316. u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
  317. EXPORT_SYMBOL_GPL(musb_readb);
  318. void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
  319. EXPORT_SYMBOL_GPL(musb_writeb);
  320. u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
  321. EXPORT_SYMBOL_GPL(musb_readw);
  322. void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
  323. EXPORT_SYMBOL_GPL(musb_writew);
  324. u32 (*musb_readl)(const void __iomem *addr, unsigned offset);
  325. EXPORT_SYMBOL_GPL(musb_readl);
  326. void (*musb_writel)(void __iomem *addr, unsigned offset, u32 data);
  327. EXPORT_SYMBOL_GPL(musb_writel);
  328. /*
  329. * New style IO functions
  330. */
  331. void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
  332. {
  333. return hw_ep->musb->io.read_fifo(hw_ep, len, dst);
  334. }
  335. void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
  336. {
  337. return hw_ep->musb->io.write_fifo(hw_ep, len, src);
  338. }
  339. /*-------------------------------------------------------------------------*/
  340. /* for high speed test mode; see USB 2.0 spec 7.1.20 */
  341. static const u8 musb_test_packet[53] = {
  342. /* implicit SYNC then DATA0 to start */
  343. /* JKJKJKJK x9 */
  344. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  345. /* JJKKJJKK x8 */
  346. 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
  347. /* JJJJKKKK x8 */
  348. 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
  349. /* JJJJJJJKKKKKKK x8 */
  350. 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  351. /* JJJJJJJK x8 */
  352. 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
  353. /* JKKKKKKK x10, JK */
  354. 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
  355. /* implicit CRC16 then EOP to end */
  356. };
  357. void musb_load_testpacket(struct musb *musb)
  358. {
  359. void __iomem *regs = musb->endpoints[0].regs;
  360. musb_ep_select(musb->mregs, 0);
  361. musb_write_fifo(musb->control_ep,
  362. sizeof(musb_test_packet), musb_test_packet);
  363. musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
  364. }
  365. /*-------------------------------------------------------------------------*/
  366. /*
  367. * Handles OTG hnp timeouts, such as b_ase0_brst
  368. */
  369. static void musb_otg_timer_func(unsigned long data)
  370. {
  371. struct musb *musb = (struct musb *)data;
  372. unsigned long flags;
  373. spin_lock_irqsave(&musb->lock, flags);
  374. switch (musb->xceiv->otg->state) {
  375. case OTG_STATE_B_WAIT_ACON:
  376. dev_dbg(musb->controller, "HNP: b_wait_acon timeout; back to b_peripheral\n");
  377. musb_g_disconnect(musb);
  378. musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
  379. musb->is_active = 0;
  380. break;
  381. case OTG_STATE_A_SUSPEND:
  382. case OTG_STATE_A_WAIT_BCON:
  383. dev_dbg(musb->controller, "HNP: %s timeout\n",
  384. usb_otg_state_string(musb->xceiv->otg->state));
  385. musb_platform_set_vbus(musb, 0);
  386. musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
  387. break;
  388. default:
  389. dev_dbg(musb->controller, "HNP: Unhandled mode %s\n",
  390. usb_otg_state_string(musb->xceiv->otg->state));
  391. }
  392. spin_unlock_irqrestore(&musb->lock, flags);
  393. }
  394. /*
  395. * Stops the HNP transition. Caller must take care of locking.
  396. */
  397. void musb_hnp_stop(struct musb *musb)
  398. {
  399. struct usb_hcd *hcd = musb->hcd;
  400. void __iomem *mbase = musb->mregs;
  401. u8 reg;
  402. dev_dbg(musb->controller, "HNP: stop from %s\n",
  403. usb_otg_state_string(musb->xceiv->otg->state));
  404. switch (musb->xceiv->otg->state) {
  405. case OTG_STATE_A_PERIPHERAL:
  406. musb_g_disconnect(musb);
  407. dev_dbg(musb->controller, "HNP: back to %s\n",
  408. usb_otg_state_string(musb->xceiv->otg->state));
  409. break;
  410. case OTG_STATE_B_HOST:
  411. dev_dbg(musb->controller, "HNP: Disabling HR\n");
  412. if (hcd)
  413. hcd->self.is_b_host = 0;
  414. musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
  415. MUSB_DEV_MODE(musb);
  416. reg = musb_readb(mbase, MUSB_POWER);
  417. reg |= MUSB_POWER_SUSPENDM;
  418. musb_writeb(mbase, MUSB_POWER, reg);
  419. /* REVISIT: Start SESSION_REQUEST here? */
  420. break;
  421. default:
  422. dev_dbg(musb->controller, "HNP: Stopping in unknown state %s\n",
  423. usb_otg_state_string(musb->xceiv->otg->state));
  424. }
  425. /*
  426. * When returning to A state after HNP, avoid hub_port_rebounce(),
  427. * which cause occasional OPT A "Did not receive reset after connect"
  428. * errors.
  429. */
  430. musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
  431. }
  432. static void musb_recover_from_babble(struct musb *musb);
  433. /*
  434. * Interrupt Service Routine to record USB "global" interrupts.
  435. * Since these do not happen often and signify things of
  436. * paramount importance, it seems OK to check them individually;
  437. * the order of the tests is specified in the manual
  438. *
  439. * @param musb instance pointer
  440. * @param int_usb register contents
  441. * @param devctl
  442. * @param power
  443. */
  444. static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
  445. u8 devctl)
  446. {
  447. irqreturn_t handled = IRQ_NONE;
  448. dev_dbg(musb->controller, "<== DevCtl=%02x, int_usb=0x%x\n", devctl,
  449. int_usb);
  450. /* in host mode, the peripheral may issue remote wakeup.
  451. * in peripheral mode, the host may resume the link.
  452. * spurious RESUME irqs happen too, paired with SUSPEND.
  453. */
  454. if (int_usb & MUSB_INTR_RESUME) {
  455. handled = IRQ_HANDLED;
  456. dev_dbg(musb->controller, "RESUME (%s)\n",
  457. usb_otg_state_string(musb->xceiv->otg->state));
  458. if (devctl & MUSB_DEVCTL_HM) {
  459. switch (musb->xceiv->otg->state) {
  460. case OTG_STATE_A_SUSPEND:
  461. /* remote wakeup? later, GetPortStatus
  462. * will stop RESUME signaling
  463. */
  464. musb->port1_status |=
  465. (USB_PORT_STAT_C_SUSPEND << 16)
  466. | MUSB_PORT_STAT_RESUME;
  467. musb->rh_timer = jiffies
  468. + msecs_to_jiffies(USB_RESUME_TIMEOUT);
  469. musb->need_finish_resume = 1;
  470. musb->xceiv->otg->state = OTG_STATE_A_HOST;
  471. musb->is_active = 1;
  472. musb_host_resume_root_hub(musb);
  473. break;
  474. case OTG_STATE_B_WAIT_ACON:
  475. musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
  476. musb->is_active = 1;
  477. MUSB_DEV_MODE(musb);
  478. break;
  479. default:
  480. WARNING("bogus %s RESUME (%s)\n",
  481. "host",
  482. usb_otg_state_string(musb->xceiv->otg->state));
  483. }
  484. } else {
  485. switch (musb->xceiv->otg->state) {
  486. case OTG_STATE_A_SUSPEND:
  487. /* possibly DISCONNECT is upcoming */
  488. musb->xceiv->otg->state = OTG_STATE_A_HOST;
  489. musb_host_resume_root_hub(musb);
  490. break;
  491. case OTG_STATE_B_WAIT_ACON:
  492. case OTG_STATE_B_PERIPHERAL:
  493. /* disconnect while suspended? we may
  494. * not get a disconnect irq...
  495. */
  496. if ((devctl & MUSB_DEVCTL_VBUS)
  497. != (3 << MUSB_DEVCTL_VBUS_SHIFT)
  498. ) {
  499. musb->int_usb |= MUSB_INTR_DISCONNECT;
  500. musb->int_usb &= ~MUSB_INTR_SUSPEND;
  501. break;
  502. }
  503. musb_g_resume(musb);
  504. break;
  505. case OTG_STATE_B_IDLE:
  506. musb->int_usb &= ~MUSB_INTR_SUSPEND;
  507. break;
  508. default:
  509. WARNING("bogus %s RESUME (%s)\n",
  510. "peripheral",
  511. usb_otg_state_string(musb->xceiv->otg->state));
  512. }
  513. }
  514. }
  515. /* see manual for the order of the tests */
  516. if (int_usb & MUSB_INTR_SESSREQ) {
  517. void __iomem *mbase = musb->mregs;
  518. if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
  519. && (devctl & MUSB_DEVCTL_BDEVICE)) {
  520. dev_dbg(musb->controller, "SessReq while on B state\n");
  521. return IRQ_HANDLED;
  522. }
  523. dev_dbg(musb->controller, "SESSION_REQUEST (%s)\n",
  524. usb_otg_state_string(musb->xceiv->otg->state));
  525. /* IRQ arrives from ID pin sense or (later, if VBUS power
  526. * is removed) SRP. responses are time critical:
  527. * - turn on VBUS (with silicon-specific mechanism)
  528. * - go through A_WAIT_VRISE
  529. * - ... to A_WAIT_BCON.
  530. * a_wait_vrise_tmout triggers VBUS_ERROR transitions
  531. */
  532. musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
  533. musb->ep0_stage = MUSB_EP0_START;
  534. musb->xceiv->otg->state = OTG_STATE_A_IDLE;
  535. MUSB_HST_MODE(musb);
  536. musb_platform_set_vbus(musb, 1);
  537. handled = IRQ_HANDLED;
  538. }
  539. if (int_usb & MUSB_INTR_VBUSERROR) {
  540. int ignore = 0;
  541. /* During connection as an A-Device, we may see a short
  542. * current spikes causing voltage drop, because of cable
  543. * and peripheral capacitance combined with vbus draw.
  544. * (So: less common with truly self-powered devices, where
  545. * vbus doesn't act like a power supply.)
  546. *
  547. * Such spikes are short; usually less than ~500 usec, max
  548. * of ~2 msec. That is, they're not sustained overcurrent
  549. * errors, though they're reported using VBUSERROR irqs.
  550. *
  551. * Workarounds: (a) hardware: use self powered devices.
  552. * (b) software: ignore non-repeated VBUS errors.
  553. *
  554. * REVISIT: do delays from lots of DEBUG_KERNEL checks
  555. * make trouble here, keeping VBUS < 4.4V ?
  556. */
  557. switch (musb->xceiv->otg->state) {
  558. case OTG_STATE_A_HOST:
  559. /* recovery is dicey once we've gotten past the
  560. * initial stages of enumeration, but if VBUS
  561. * stayed ok at the other end of the link, and
  562. * another reset is due (at least for high speed,
  563. * to redo the chirp etc), it might work OK...
  564. */
  565. case OTG_STATE_A_WAIT_BCON:
  566. case OTG_STATE_A_WAIT_VRISE:
  567. if (musb->vbuserr_retry) {
  568. void __iomem *mbase = musb->mregs;
  569. musb->vbuserr_retry--;
  570. ignore = 1;
  571. devctl |= MUSB_DEVCTL_SESSION;
  572. musb_writeb(mbase, MUSB_DEVCTL, devctl);
  573. } else {
  574. musb->port1_status |=
  575. USB_PORT_STAT_OVERCURRENT
  576. | (USB_PORT_STAT_C_OVERCURRENT << 16);
  577. }
  578. break;
  579. default:
  580. break;
  581. }
  582. dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller,
  583. "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
  584. usb_otg_state_string(musb->xceiv->otg->state),
  585. devctl,
  586. ({ char *s;
  587. switch (devctl & MUSB_DEVCTL_VBUS) {
  588. case 0 << MUSB_DEVCTL_VBUS_SHIFT:
  589. s = "<SessEnd"; break;
  590. case 1 << MUSB_DEVCTL_VBUS_SHIFT:
  591. s = "<AValid"; break;
  592. case 2 << MUSB_DEVCTL_VBUS_SHIFT:
  593. s = "<VBusValid"; break;
  594. /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
  595. default:
  596. s = "VALID"; break;
  597. } s; }),
  598. VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
  599. musb->port1_status);
  600. /* go through A_WAIT_VFALL then start a new session */
  601. if (!ignore)
  602. musb_platform_set_vbus(musb, 0);
  603. handled = IRQ_HANDLED;
  604. }
  605. if (int_usb & MUSB_INTR_SUSPEND) {
  606. dev_dbg(musb->controller, "SUSPEND (%s) devctl %02x\n",
  607. usb_otg_state_string(musb->xceiv->otg->state), devctl);
  608. handled = IRQ_HANDLED;
  609. switch (musb->xceiv->otg->state) {
  610. case OTG_STATE_A_PERIPHERAL:
  611. /* We also come here if the cable is removed, since
  612. * this silicon doesn't report ID-no-longer-grounded.
  613. *
  614. * We depend on T(a_wait_bcon) to shut us down, and
  615. * hope users don't do anything dicey during this
  616. * undesired detour through A_WAIT_BCON.
  617. */
  618. musb_hnp_stop(musb);
  619. musb_host_resume_root_hub(musb);
  620. musb_root_disconnect(musb);
  621. musb_platform_try_idle(musb, jiffies
  622. + msecs_to_jiffies(musb->a_wait_bcon
  623. ? : OTG_TIME_A_WAIT_BCON));
  624. break;
  625. case OTG_STATE_B_IDLE:
  626. if (!musb->is_active)
  627. break;
  628. case OTG_STATE_B_PERIPHERAL:
  629. musb_g_suspend(musb);
  630. musb->is_active = musb->g.b_hnp_enable;
  631. if (musb->is_active) {
  632. musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
  633. dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n");
  634. mod_timer(&musb->otg_timer, jiffies
  635. + msecs_to_jiffies(
  636. OTG_TIME_B_ASE0_BRST));
  637. }
  638. break;
  639. case OTG_STATE_A_WAIT_BCON:
  640. if (musb->a_wait_bcon != 0)
  641. musb_platform_try_idle(musb, jiffies
  642. + msecs_to_jiffies(musb->a_wait_bcon));
  643. break;
  644. case OTG_STATE_A_HOST:
  645. musb->xceiv->otg->state = OTG_STATE_A_SUSPEND;
  646. musb->is_active = musb->hcd->self.b_hnp_enable;
  647. break;
  648. case OTG_STATE_B_HOST:
  649. /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
  650. dev_dbg(musb->controller, "REVISIT: SUSPEND as B_HOST\n");
  651. break;
  652. default:
  653. /* "should not happen" */
  654. musb->is_active = 0;
  655. break;
  656. }
  657. }
  658. if (int_usb & MUSB_INTR_CONNECT) {
  659. struct usb_hcd *hcd = musb->hcd;
  660. handled = IRQ_HANDLED;
  661. musb->is_active = 1;
  662. musb->ep0_stage = MUSB_EP0_START;
  663. musb->intrtxe = musb->epmask;
  664. musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
  665. musb->intrrxe = musb->epmask & 0xfffe;
  666. musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
  667. musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
  668. musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
  669. |USB_PORT_STAT_HIGH_SPEED
  670. |USB_PORT_STAT_ENABLE
  671. );
  672. musb->port1_status |= USB_PORT_STAT_CONNECTION
  673. |(USB_PORT_STAT_C_CONNECTION << 16);
  674. /* high vs full speed is just a guess until after reset */
  675. if (devctl & MUSB_DEVCTL_LSDEV)
  676. musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
  677. /* indicate new connection to OTG machine */
  678. switch (musb->xceiv->otg->state) {
  679. case OTG_STATE_B_PERIPHERAL:
  680. if (int_usb & MUSB_INTR_SUSPEND) {
  681. dev_dbg(musb->controller, "HNP: SUSPEND+CONNECT, now b_host\n");
  682. int_usb &= ~MUSB_INTR_SUSPEND;
  683. goto b_host;
  684. } else
  685. dev_dbg(musb->controller, "CONNECT as b_peripheral???\n");
  686. break;
  687. case OTG_STATE_B_WAIT_ACON:
  688. dev_dbg(musb->controller, "HNP: CONNECT, now b_host\n");
  689. b_host:
  690. musb->xceiv->otg->state = OTG_STATE_B_HOST;
  691. if (musb->hcd)
  692. musb->hcd->self.is_b_host = 1;
  693. del_timer(&musb->otg_timer);
  694. break;
  695. default:
  696. if ((devctl & MUSB_DEVCTL_VBUS)
  697. == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
  698. musb->xceiv->otg->state = OTG_STATE_A_HOST;
  699. if (hcd)
  700. hcd->self.is_b_host = 0;
  701. }
  702. break;
  703. }
  704. musb_host_poke_root_hub(musb);
  705. dev_dbg(musb->controller, "CONNECT (%s) devctl %02x\n",
  706. usb_otg_state_string(musb->xceiv->otg->state), devctl);
  707. }
  708. if (int_usb & MUSB_INTR_DISCONNECT) {
  709. dev_dbg(musb->controller, "DISCONNECT (%s) as %s, devctl %02x\n",
  710. usb_otg_state_string(musb->xceiv->otg->state),
  711. MUSB_MODE(musb), devctl);
  712. handled = IRQ_HANDLED;
  713. switch (musb->xceiv->otg->state) {
  714. case OTG_STATE_A_HOST:
  715. case OTG_STATE_A_SUSPEND:
  716. musb_host_resume_root_hub(musb);
  717. musb_root_disconnect(musb);
  718. if (musb->a_wait_bcon != 0)
  719. musb_platform_try_idle(musb, jiffies
  720. + msecs_to_jiffies(musb->a_wait_bcon));
  721. break;
  722. case OTG_STATE_B_HOST:
  723. /* REVISIT this behaves for "real disconnect"
  724. * cases; make sure the other transitions from
  725. * from B_HOST act right too. The B_HOST code
  726. * in hnp_stop() is currently not used...
  727. */
  728. musb_root_disconnect(musb);
  729. if (musb->hcd)
  730. musb->hcd->self.is_b_host = 0;
  731. musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
  732. MUSB_DEV_MODE(musb);
  733. musb_g_disconnect(musb);
  734. break;
  735. case OTG_STATE_A_PERIPHERAL:
  736. musb_hnp_stop(musb);
  737. musb_root_disconnect(musb);
  738. /* FALLTHROUGH */
  739. case OTG_STATE_B_WAIT_ACON:
  740. /* FALLTHROUGH */
  741. case OTG_STATE_B_PERIPHERAL:
  742. case OTG_STATE_B_IDLE:
  743. musb_g_disconnect(musb);
  744. break;
  745. default:
  746. WARNING("unhandled DISCONNECT transition (%s)\n",
  747. usb_otg_state_string(musb->xceiv->otg->state));
  748. break;
  749. }
  750. }
  751. /* mentor saves a bit: bus reset and babble share the same irq.
  752. * only host sees babble; only peripheral sees bus reset.
  753. */
  754. if (int_usb & MUSB_INTR_RESET) {
  755. handled = IRQ_HANDLED;
  756. if (devctl & MUSB_DEVCTL_HM) {
  757. /*
  758. * When BABBLE happens what we can depends on which
  759. * platform MUSB is running, because some platforms
  760. * implemented proprietary means for 'recovering' from
  761. * Babble conditions. One such platform is AM335x. In
  762. * most cases, however, the only thing we can do is
  763. * drop the session.
  764. */
  765. dev_err(musb->controller, "Babble\n");
  766. if (is_host_active(musb))
  767. musb_recover_from_babble(musb);
  768. } else {
  769. dev_dbg(musb->controller, "BUS RESET as %s\n",
  770. usb_otg_state_string(musb->xceiv->otg->state));
  771. switch (musb->xceiv->otg->state) {
  772. case OTG_STATE_A_SUSPEND:
  773. musb_g_reset(musb);
  774. /* FALLTHROUGH */
  775. case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
  776. /* never use invalid T(a_wait_bcon) */
  777. dev_dbg(musb->controller, "HNP: in %s, %d msec timeout\n",
  778. usb_otg_state_string(musb->xceiv->otg->state),
  779. TA_WAIT_BCON(musb));
  780. mod_timer(&musb->otg_timer, jiffies
  781. + msecs_to_jiffies(TA_WAIT_BCON(musb)));
  782. break;
  783. case OTG_STATE_A_PERIPHERAL:
  784. del_timer(&musb->otg_timer);
  785. musb_g_reset(musb);
  786. break;
  787. case OTG_STATE_B_WAIT_ACON:
  788. dev_dbg(musb->controller, "HNP: RESET (%s), to b_peripheral\n",
  789. usb_otg_state_string(musb->xceiv->otg->state));
  790. musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
  791. musb_g_reset(musb);
  792. break;
  793. case OTG_STATE_B_IDLE:
  794. musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
  795. /* FALLTHROUGH */
  796. case OTG_STATE_B_PERIPHERAL:
  797. musb_g_reset(musb);
  798. break;
  799. default:
  800. dev_dbg(musb->controller, "Unhandled BUS RESET as %s\n",
  801. usb_otg_state_string(musb->xceiv->otg->state));
  802. }
  803. }
  804. }
  805. #if 0
  806. /* REVISIT ... this would be for multiplexing periodic endpoints, or
  807. * supporting transfer phasing to prevent exceeding ISO bandwidth
  808. * limits of a given frame or microframe.
  809. *
  810. * It's not needed for peripheral side, which dedicates endpoints;
  811. * though it _might_ use SOF irqs for other purposes.
  812. *
  813. * And it's not currently needed for host side, which also dedicates
  814. * endpoints, relies on TX/RX interval registers, and isn't claimed
  815. * to support ISO transfers yet.
  816. */
  817. if (int_usb & MUSB_INTR_SOF) {
  818. void __iomem *mbase = musb->mregs;
  819. struct musb_hw_ep *ep;
  820. u8 epnum;
  821. u16 frame;
  822. dev_dbg(musb->controller, "START_OF_FRAME\n");
  823. handled = IRQ_HANDLED;
  824. /* start any periodic Tx transfers waiting for current frame */
  825. frame = musb_readw(mbase, MUSB_FRAME);
  826. ep = musb->endpoints;
  827. for (epnum = 1; (epnum < musb->nr_endpoints)
  828. && (musb->epmask >= (1 << epnum));
  829. epnum++, ep++) {
  830. /*
  831. * FIXME handle framecounter wraps (12 bits)
  832. * eliminate duplicated StartUrb logic
  833. */
  834. if (ep->dwWaitFrame >= frame) {
  835. ep->dwWaitFrame = 0;
  836. pr_debug("SOF --> periodic TX%s on %d\n",
  837. ep->tx_channel ? " DMA" : "",
  838. epnum);
  839. if (!ep->tx_channel)
  840. musb_h_tx_start(musb, epnum);
  841. else
  842. cppi_hostdma_start(musb, epnum);
  843. }
  844. } /* end of for loop */
  845. }
  846. #endif
  847. schedule_work(&musb->irq_work);
  848. return handled;
  849. }
  850. /*-------------------------------------------------------------------------*/
  851. static void musb_disable_interrupts(struct musb *musb)
  852. {
  853. void __iomem *mbase = musb->mregs;
  854. u16 temp;
  855. /* disable interrupts */
  856. musb_writeb(mbase, MUSB_INTRUSBE, 0);
  857. musb->intrtxe = 0;
  858. musb_writew(mbase, MUSB_INTRTXE, 0);
  859. musb->intrrxe = 0;
  860. musb_writew(mbase, MUSB_INTRRXE, 0);
  861. /* flush pending interrupts */
  862. temp = musb_readb(mbase, MUSB_INTRUSB);
  863. temp = musb_readw(mbase, MUSB_INTRTX);
  864. temp = musb_readw(mbase, MUSB_INTRRX);
  865. }
  866. static void musb_enable_interrupts(struct musb *musb)
  867. {
  868. void __iomem *regs = musb->mregs;
  869. /* Set INT enable registers, enable interrupts */
  870. musb->intrtxe = musb->epmask;
  871. musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
  872. musb->intrrxe = musb->epmask & 0xfffe;
  873. musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
  874. musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
  875. }
  876. static void musb_generic_disable(struct musb *musb)
  877. {
  878. void __iomem *mbase = musb->mregs;
  879. musb_disable_interrupts(musb);
  880. /* off */
  881. musb_writeb(mbase, MUSB_DEVCTL, 0);
  882. }
  883. /*
  884. * Program the HDRC to start (enable interrupts, dma, etc.).
  885. */
  886. void musb_start(struct musb *musb)
  887. {
  888. void __iomem *regs = musb->mregs;
  889. u8 devctl = musb_readb(regs, MUSB_DEVCTL);
  890. dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
  891. musb_enable_interrupts(musb);
  892. musb_writeb(regs, MUSB_TESTMODE, 0);
  893. /* put into basic highspeed mode and start session */
  894. musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
  895. | MUSB_POWER_HSENAB
  896. /* ENSUSPEND wedges tusb */
  897. /* | MUSB_POWER_ENSUSPEND */
  898. );
  899. musb->is_active = 0;
  900. devctl = musb_readb(regs, MUSB_DEVCTL);
  901. devctl &= ~MUSB_DEVCTL_SESSION;
  902. /* session started after:
  903. * (a) ID-grounded irq, host mode;
  904. * (b) vbus present/connect IRQ, peripheral mode;
  905. * (c) peripheral initiates, using SRP
  906. */
  907. if (musb->port_mode != MUSB_PORT_MODE_HOST &&
  908. (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
  909. musb->is_active = 1;
  910. } else {
  911. devctl |= MUSB_DEVCTL_SESSION;
  912. }
  913. musb_platform_enable(musb);
  914. musb_writeb(regs, MUSB_DEVCTL, devctl);
  915. }
  916. /*
  917. * Make the HDRC stop (disable interrupts, etc.);
  918. * reversible by musb_start
  919. * called on gadget driver unregister
  920. * with controller locked, irqs blocked
  921. * acts as a NOP unless some role activated the hardware
  922. */
  923. void musb_stop(struct musb *musb)
  924. {
  925. /* stop IRQs, timers, ... */
  926. musb_platform_disable(musb);
  927. musb_generic_disable(musb);
  928. dev_dbg(musb->controller, "HDRC disabled\n");
  929. /* FIXME
  930. * - mark host and/or peripheral drivers unusable/inactive
  931. * - disable DMA (and enable it in HdrcStart)
  932. * - make sure we can musb_start() after musb_stop(); with
  933. * OTG mode, gadget driver module rmmod/modprobe cycles that
  934. * - ...
  935. */
  936. musb_platform_try_idle(musb, 0);
  937. }
  938. static void musb_shutdown(struct platform_device *pdev)
  939. {
  940. struct musb *musb = dev_to_musb(&pdev->dev);
  941. unsigned long flags;
  942. pm_runtime_get_sync(musb->controller);
  943. musb_host_cleanup(musb);
  944. musb_gadget_cleanup(musb);
  945. spin_lock_irqsave(&musb->lock, flags);
  946. musb_platform_disable(musb);
  947. musb_generic_disable(musb);
  948. spin_unlock_irqrestore(&musb->lock, flags);
  949. musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
  950. musb_platform_exit(musb);
  951. pm_runtime_put(musb->controller);
  952. /* FIXME power down */
  953. }
  954. /*-------------------------------------------------------------------------*/
  955. /*
  956. * The silicon either has hard-wired endpoint configurations, or else
  957. * "dynamic fifo" sizing. The driver has support for both, though at this
  958. * writing only the dynamic sizing is very well tested. Since we switched
  959. * away from compile-time hardware parameters, we can no longer rely on
  960. * dead code elimination to leave only the relevant one in the object file.
  961. *
  962. * We don't currently use dynamic fifo setup capability to do anything
  963. * more than selecting one of a bunch of predefined configurations.
  964. */
  965. static ushort fifo_mode;
  966. /* "modprobe ... fifo_mode=1" etc */
  967. module_param(fifo_mode, ushort, 0);
  968. MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
  969. /*
  970. * tables defining fifo_mode values. define more if you like.
  971. * for host side, make sure both halves of ep1 are set up.
  972. */
  973. /* mode 0 - fits in 2KB */
  974. static struct musb_fifo_cfg mode_0_cfg[] = {
  975. { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
  976. { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
  977. { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
  978. { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
  979. { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
  980. };
  981. /* mode 1 - fits in 4KB */
  982. static struct musb_fifo_cfg mode_1_cfg[] = {
  983. { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
  984. { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
  985. { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
  986. { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
  987. { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
  988. };
  989. /* mode 2 - fits in 4KB */
  990. static struct musb_fifo_cfg mode_2_cfg[] = {
  991. { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
  992. { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
  993. { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
  994. { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
  995. { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
  996. { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
  997. };
  998. /* mode 3 - fits in 4KB */
  999. static struct musb_fifo_cfg mode_3_cfg[] = {
  1000. { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
  1001. { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
  1002. { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
  1003. { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
  1004. { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
  1005. { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
  1006. };
  1007. /* mode 4 - fits in 16KB */
  1008. static struct musb_fifo_cfg mode_4_cfg[] = {
  1009. { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
  1010. { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
  1011. { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
  1012. { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
  1013. { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
  1014. { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
  1015. { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
  1016. { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
  1017. { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
  1018. { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
  1019. { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, },
  1020. { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, },
  1021. { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
  1022. { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, },
  1023. { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, },
  1024. { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
  1025. { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
  1026. { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
  1027. { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, },
  1028. { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, },
  1029. { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, },
  1030. { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, },
  1031. { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, },
  1032. { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, },
  1033. { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
  1034. { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
  1035. { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
  1036. };
  1037. /* mode 5 - fits in 8KB */
  1038. static struct musb_fifo_cfg mode_5_cfg[] = {
  1039. { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
  1040. { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
  1041. { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
  1042. { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
  1043. { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
  1044. { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
  1045. { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
  1046. { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
  1047. { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
  1048. { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
  1049. { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
  1050. { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
  1051. { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
  1052. { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
  1053. { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
  1054. { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
  1055. { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
  1056. { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
  1057. { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
  1058. { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
  1059. { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
  1060. { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
  1061. { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
  1062. { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
  1063. { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
  1064. { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
  1065. { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
  1066. };
  1067. /*
  1068. * configure a fifo; for non-shared endpoints, this may be called
  1069. * once for a tx fifo and once for an rx fifo.
  1070. *
  1071. * returns negative errno or offset for next fifo.
  1072. */
  1073. static int
  1074. fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
  1075. const struct musb_fifo_cfg *cfg, u16 offset)
  1076. {
  1077. void __iomem *mbase = musb->mregs;
  1078. int size = 0;
  1079. u16 maxpacket = cfg->maxpacket;
  1080. u16 c_off = offset >> 3;
  1081. u8 c_size;
  1082. /* expect hw_ep has already been zero-initialized */
  1083. size = ffs(max(maxpacket, (u16) 8)) - 1;
  1084. maxpacket = 1 << size;
  1085. c_size = size - 3;
  1086. if (cfg->mode == BUF_DOUBLE) {
  1087. if ((offset + (maxpacket << 1)) >
  1088. (1 << (musb->config->ram_bits + 2)))
  1089. return -EMSGSIZE;
  1090. c_size |= MUSB_FIFOSZ_DPB;
  1091. } else {
  1092. if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
  1093. return -EMSGSIZE;
  1094. }
  1095. /* configure the FIFO */
  1096. musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
  1097. /* EP0 reserved endpoint for control, bidirectional;
  1098. * EP1 reserved for bulk, two unidirectional halves.
  1099. */
  1100. if (hw_ep->epnum == 1)
  1101. musb->bulk_ep = hw_ep;
  1102. /* REVISIT error check: be sure ep0 can both rx and tx ... */
  1103. switch (cfg->style) {
  1104. case FIFO_TX:
  1105. musb_write_txfifosz(mbase, c_size);
  1106. musb_write_txfifoadd(mbase, c_off);
  1107. hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
  1108. hw_ep->max_packet_sz_tx = maxpacket;
  1109. break;
  1110. case FIFO_RX:
  1111. musb_write_rxfifosz(mbase, c_size);
  1112. musb_write_rxfifoadd(mbase, c_off);
  1113. hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
  1114. hw_ep->max_packet_sz_rx = maxpacket;
  1115. break;
  1116. case FIFO_RXTX:
  1117. musb_write_txfifosz(mbase, c_size);
  1118. musb_write_txfifoadd(mbase, c_off);
  1119. hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
  1120. hw_ep->max_packet_sz_rx = maxpacket;
  1121. musb_write_rxfifosz(mbase, c_size);
  1122. musb_write_rxfifoadd(mbase, c_off);
  1123. hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
  1124. hw_ep->max_packet_sz_tx = maxpacket;
  1125. hw_ep->is_shared_fifo = true;
  1126. break;
  1127. }
  1128. /* NOTE rx and tx endpoint irqs aren't managed separately,
  1129. * which happens to be ok
  1130. */
  1131. musb->epmask |= (1 << hw_ep->epnum);
  1132. return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
  1133. }
  1134. static struct musb_fifo_cfg ep0_cfg = {
  1135. .style = FIFO_RXTX, .maxpacket = 64,
  1136. };
  1137. static int ep_config_from_table(struct musb *musb)
  1138. {
  1139. const struct musb_fifo_cfg *cfg;
  1140. unsigned i, n;
  1141. int offset;
  1142. struct musb_hw_ep *hw_ep = musb->endpoints;
  1143. if (musb->config->fifo_cfg) {
  1144. cfg = musb->config->fifo_cfg;
  1145. n = musb->config->fifo_cfg_size;
  1146. goto done;
  1147. }
  1148. switch (fifo_mode) {
  1149. default:
  1150. fifo_mode = 0;
  1151. /* FALLTHROUGH */
  1152. case 0:
  1153. cfg = mode_0_cfg;
  1154. n = ARRAY_SIZE(mode_0_cfg);
  1155. break;
  1156. case 1:
  1157. cfg = mode_1_cfg;
  1158. n = ARRAY_SIZE(mode_1_cfg);
  1159. break;
  1160. case 2:
  1161. cfg = mode_2_cfg;
  1162. n = ARRAY_SIZE(mode_2_cfg);
  1163. break;
  1164. case 3:
  1165. cfg = mode_3_cfg;
  1166. n = ARRAY_SIZE(mode_3_cfg);
  1167. break;
  1168. case 4:
  1169. cfg = mode_4_cfg;
  1170. n = ARRAY_SIZE(mode_4_cfg);
  1171. break;
  1172. case 5:
  1173. cfg = mode_5_cfg;
  1174. n = ARRAY_SIZE(mode_5_cfg);
  1175. break;
  1176. }
  1177. printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
  1178. musb_driver_name, fifo_mode);
  1179. done:
  1180. offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
  1181. /* assert(offset > 0) */
  1182. /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
  1183. * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
  1184. */
  1185. for (i = 0; i < n; i++) {
  1186. u8 epn = cfg->hw_ep_num;
  1187. if (epn >= musb->config->num_eps) {
  1188. pr_debug("%s: invalid ep %d\n",
  1189. musb_driver_name, epn);
  1190. return -EINVAL;
  1191. }
  1192. offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
  1193. if (offset < 0) {
  1194. pr_debug("%s: mem overrun, ep %d\n",
  1195. musb_driver_name, epn);
  1196. return offset;
  1197. }
  1198. epn++;
  1199. musb->nr_endpoints = max(epn, musb->nr_endpoints);
  1200. }
  1201. printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n",
  1202. musb_driver_name,
  1203. n + 1, musb->config->num_eps * 2 - 1,
  1204. offset, (1 << (musb->config->ram_bits + 2)));
  1205. if (!musb->bulk_ep) {
  1206. pr_debug("%s: missing bulk\n", musb_driver_name);
  1207. return -EINVAL;
  1208. }
  1209. return 0;
  1210. }
  1211. /*
  1212. * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
  1213. * @param musb the controller
  1214. */
  1215. static int ep_config_from_hw(struct musb *musb)
  1216. {
  1217. u8 epnum = 0;
  1218. struct musb_hw_ep *hw_ep;
  1219. void __iomem *mbase = musb->mregs;
  1220. int ret = 0;
  1221. dev_dbg(musb->controller, "<== static silicon ep config\n");
  1222. /* FIXME pick up ep0 maxpacket size */
  1223. for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
  1224. musb_ep_select(mbase, epnum);
  1225. hw_ep = musb->endpoints + epnum;
  1226. ret = musb_read_fifosize(musb, hw_ep, epnum);
  1227. if (ret < 0)
  1228. break;
  1229. /* FIXME set up hw_ep->{rx,tx}_double_buffered */
  1230. /* pick an RX/TX endpoint for bulk */
  1231. if (hw_ep->max_packet_sz_tx < 512
  1232. || hw_ep->max_packet_sz_rx < 512)
  1233. continue;
  1234. /* REVISIT: this algorithm is lazy, we should at least
  1235. * try to pick a double buffered endpoint.
  1236. */
  1237. if (musb->bulk_ep)
  1238. continue;
  1239. musb->bulk_ep = hw_ep;
  1240. }
  1241. if (!musb->bulk_ep) {
  1242. pr_debug("%s: missing bulk\n", musb_driver_name);
  1243. return -EINVAL;
  1244. }
  1245. return 0;
  1246. }
  1247. enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
  1248. /* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
  1249. * configure endpoints, or take their config from silicon
  1250. */
  1251. static int musb_core_init(u16 musb_type, struct musb *musb)
  1252. {
  1253. u8 reg;
  1254. char *type;
  1255. char aInfo[90], aRevision[32], aDate[12];
  1256. void __iomem *mbase = musb->mregs;
  1257. int status = 0;
  1258. int i;
  1259. /* log core options (read using indexed model) */
  1260. reg = musb_read_configdata(mbase);
  1261. strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
  1262. if (reg & MUSB_CONFIGDATA_DYNFIFO) {
  1263. strcat(aInfo, ", dyn FIFOs");
  1264. musb->dyn_fifo = true;
  1265. }
  1266. if (reg & MUSB_CONFIGDATA_MPRXE) {
  1267. strcat(aInfo, ", bulk combine");
  1268. musb->bulk_combine = true;
  1269. }
  1270. if (reg & MUSB_CONFIGDATA_MPTXE) {
  1271. strcat(aInfo, ", bulk split");
  1272. musb->bulk_split = true;
  1273. }
  1274. if (reg & MUSB_CONFIGDATA_HBRXE) {
  1275. strcat(aInfo, ", HB-ISO Rx");
  1276. musb->hb_iso_rx = true;
  1277. }
  1278. if (reg & MUSB_CONFIGDATA_HBTXE) {
  1279. strcat(aInfo, ", HB-ISO Tx");
  1280. musb->hb_iso_tx = true;
  1281. }
  1282. if (reg & MUSB_CONFIGDATA_SOFTCONE)
  1283. strcat(aInfo, ", SoftConn");
  1284. printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
  1285. musb_driver_name, reg, aInfo);
  1286. aDate[0] = 0;
  1287. if (MUSB_CONTROLLER_MHDRC == musb_type) {
  1288. musb->is_multipoint = 1;
  1289. type = "M";
  1290. } else {
  1291. musb->is_multipoint = 0;
  1292. type = "";
  1293. #ifndef CONFIG_USB_OTG_BLACKLIST_HUB
  1294. printk(KERN_ERR
  1295. "%s: kernel must blacklist external hubs\n",
  1296. musb_driver_name);
  1297. #endif
  1298. }
  1299. /* log release info */
  1300. musb->hwvers = musb_read_hwvers(mbase);
  1301. snprintf(aRevision, 32, "%d.%d%s", MUSB_HWVERS_MAJOR(musb->hwvers),
  1302. MUSB_HWVERS_MINOR(musb->hwvers),
  1303. (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
  1304. printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
  1305. musb_driver_name, type, aRevision, aDate);
  1306. /* configure ep0 */
  1307. musb_configure_ep0(musb);
  1308. /* discover endpoint configuration */
  1309. musb->nr_endpoints = 1;
  1310. musb->epmask = 1;
  1311. if (musb->dyn_fifo)
  1312. status = ep_config_from_table(musb);
  1313. else
  1314. status = ep_config_from_hw(musb);
  1315. if (status < 0)
  1316. return status;
  1317. /* finish init, and print endpoint config */
  1318. for (i = 0; i < musb->nr_endpoints; i++) {
  1319. struct musb_hw_ep *hw_ep = musb->endpoints + i;
  1320. hw_ep->fifo = musb->io.fifo_offset(i) + mbase;
  1321. #if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
  1322. if (musb->io.quirks & MUSB_IN_TUSB) {
  1323. hw_ep->fifo_async = musb->async + 0x400 +
  1324. musb->io.fifo_offset(i);
  1325. hw_ep->fifo_sync = musb->sync + 0x400 +
  1326. musb->io.fifo_offset(i);
  1327. hw_ep->fifo_sync_va =
  1328. musb->sync_va + 0x400 + musb->io.fifo_offset(i);
  1329. if (i == 0)
  1330. hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
  1331. else
  1332. hw_ep->conf = mbase + 0x400 +
  1333. (((i - 1) & 0xf) << 2);
  1334. }
  1335. #endif
  1336. hw_ep->regs = musb->io.ep_offset(i, 0) + mbase;
  1337. hw_ep->target_regs = musb_read_target_reg_base(i, mbase);
  1338. hw_ep->rx_reinit = 1;
  1339. hw_ep->tx_reinit = 1;
  1340. if (hw_ep->max_packet_sz_tx) {
  1341. dev_dbg(musb->controller,
  1342. "%s: hw_ep %d%s, %smax %d\n",
  1343. musb_driver_name, i,
  1344. hw_ep->is_shared_fifo ? "shared" : "tx",
  1345. hw_ep->tx_double_buffered
  1346. ? "doublebuffer, " : "",
  1347. hw_ep->max_packet_sz_tx);
  1348. }
  1349. if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
  1350. dev_dbg(musb->controller,
  1351. "%s: hw_ep %d%s, %smax %d\n",
  1352. musb_driver_name, i,
  1353. "rx",
  1354. hw_ep->rx_double_buffered
  1355. ? "doublebuffer, " : "",
  1356. hw_ep->max_packet_sz_rx);
  1357. }
  1358. if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
  1359. dev_dbg(musb->controller, "hw_ep %d not configured\n", i);
  1360. }
  1361. return 0;
  1362. }
  1363. /*-------------------------------------------------------------------------*/
  1364. /*
  1365. * handle all the irqs defined by the HDRC core. for now we expect: other
  1366. * irq sources (phy, dma, etc) will be handled first, musb->int_* values
  1367. * will be assigned, and the irq will already have been acked.
  1368. *
  1369. * called in irq context with spinlock held, irqs blocked
  1370. */
  1371. irqreturn_t musb_interrupt(struct musb *musb)
  1372. {
  1373. irqreturn_t retval = IRQ_NONE;
  1374. unsigned long status;
  1375. unsigned long epnum;
  1376. u8 devctl;
  1377. if (!musb->int_usb && !musb->int_tx && !musb->int_rx)
  1378. return IRQ_NONE;
  1379. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  1380. dev_dbg(musb->controller, "** IRQ %s usb%04x tx%04x rx%04x\n",
  1381. is_host_active(musb) ? "host" : "peripheral",
  1382. musb->int_usb, musb->int_tx, musb->int_rx);
  1383. /**
  1384. * According to Mentor Graphics' documentation, flowchart on page 98,
  1385. * IRQ should be handled as follows:
  1386. *
  1387. * . Resume IRQ
  1388. * . Session Request IRQ
  1389. * . VBUS Error IRQ
  1390. * . Suspend IRQ
  1391. * . Connect IRQ
  1392. * . Disconnect IRQ
  1393. * . Reset/Babble IRQ
  1394. * . SOF IRQ (we're not using this one)
  1395. * . Endpoint 0 IRQ
  1396. * . TX Endpoints
  1397. * . RX Endpoints
  1398. *
  1399. * We will be following that flowchart in order to avoid any problems
  1400. * that might arise with internal Finite State Machine.
  1401. */
  1402. if (musb->int_usb)
  1403. retval |= musb_stage0_irq(musb, musb->int_usb, devctl);
  1404. if (musb->int_tx & 1) {
  1405. if (is_host_active(musb))
  1406. retval |= musb_h_ep0_irq(musb);
  1407. else
  1408. retval |= musb_g_ep0_irq(musb);
  1409. /* we have just handled endpoint 0 IRQ, clear it */
  1410. musb->int_tx &= ~BIT(0);
  1411. }
  1412. status = musb->int_tx;
  1413. for_each_set_bit(epnum, &status, 16) {
  1414. retval = IRQ_HANDLED;
  1415. if (is_host_active(musb))
  1416. musb_host_tx(musb, epnum);
  1417. else
  1418. musb_g_tx(musb, epnum);
  1419. }
  1420. status = musb->int_rx;
  1421. for_each_set_bit(epnum, &status, 16) {
  1422. retval = IRQ_HANDLED;
  1423. if (is_host_active(musb))
  1424. musb_host_rx(musb, epnum);
  1425. else
  1426. musb_g_rx(musb, epnum);
  1427. }
  1428. return retval;
  1429. }
  1430. EXPORT_SYMBOL_GPL(musb_interrupt);
  1431. #ifndef CONFIG_MUSB_PIO_ONLY
  1432. static bool use_dma = 1;
  1433. /* "modprobe ... use_dma=0" etc */
  1434. module_param(use_dma, bool, 0);
  1435. MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
  1436. void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
  1437. {
  1438. /* called with controller lock already held */
  1439. if (!epnum) {
  1440. #ifndef CONFIG_USB_TUSB_OMAP_DMA
  1441. if (!is_cppi_enabled()) {
  1442. /* endpoint 0 */
  1443. if (is_host_active(musb))
  1444. musb_h_ep0_irq(musb);
  1445. else
  1446. musb_g_ep0_irq(musb);
  1447. }
  1448. #endif
  1449. } else {
  1450. /* endpoints 1..15 */
  1451. if (transmit) {
  1452. if (is_host_active(musb))
  1453. musb_host_tx(musb, epnum);
  1454. else
  1455. musb_g_tx(musb, epnum);
  1456. } else {
  1457. /* receive */
  1458. if (is_host_active(musb))
  1459. musb_host_rx(musb, epnum);
  1460. else
  1461. musb_g_rx(musb, epnum);
  1462. }
  1463. }
  1464. }
  1465. EXPORT_SYMBOL_GPL(musb_dma_completion);
  1466. #else
  1467. #define use_dma 0
  1468. #endif
  1469. /*-------------------------------------------------------------------------*/
  1470. static ssize_t
  1471. musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
  1472. {
  1473. struct musb *musb = dev_to_musb(dev);
  1474. unsigned long flags;
  1475. int ret = -EINVAL;
  1476. spin_lock_irqsave(&musb->lock, flags);
  1477. ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->otg->state));
  1478. spin_unlock_irqrestore(&musb->lock, flags);
  1479. return ret;
  1480. }
  1481. static ssize_t
  1482. musb_mode_store(struct device *dev, struct device_attribute *attr,
  1483. const char *buf, size_t n)
  1484. {
  1485. struct musb *musb = dev_to_musb(dev);
  1486. unsigned long flags;
  1487. int status;
  1488. spin_lock_irqsave(&musb->lock, flags);
  1489. if (sysfs_streq(buf, "host"))
  1490. status = musb_platform_set_mode(musb, MUSB_HOST);
  1491. else if (sysfs_streq(buf, "peripheral"))
  1492. status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
  1493. else if (sysfs_streq(buf, "otg"))
  1494. status = musb_platform_set_mode(musb, MUSB_OTG);
  1495. else
  1496. status = -EINVAL;
  1497. spin_unlock_irqrestore(&musb->lock, flags);
  1498. return (status == 0) ? n : status;
  1499. }
  1500. static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
  1501. static ssize_t
  1502. musb_vbus_store(struct device *dev, struct device_attribute *attr,
  1503. const char *buf, size_t n)
  1504. {
  1505. struct musb *musb = dev_to_musb(dev);
  1506. unsigned long flags;
  1507. unsigned long val;
  1508. if (sscanf(buf, "%lu", &val) < 1) {
  1509. dev_err(dev, "Invalid VBUS timeout ms value\n");
  1510. return -EINVAL;
  1511. }
  1512. spin_lock_irqsave(&musb->lock, flags);
  1513. /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
  1514. musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
  1515. if (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)
  1516. musb->is_active = 0;
  1517. musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
  1518. spin_unlock_irqrestore(&musb->lock, flags);
  1519. return n;
  1520. }
  1521. static ssize_t
  1522. musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
  1523. {
  1524. struct musb *musb = dev_to_musb(dev);
  1525. unsigned long flags;
  1526. unsigned long val;
  1527. int vbus;
  1528. spin_lock_irqsave(&musb->lock, flags);
  1529. val = musb->a_wait_bcon;
  1530. /* FIXME get_vbus_status() is normally #defined as false...
  1531. * and is effectively TUSB-specific.
  1532. */
  1533. vbus = musb_platform_get_vbus_status(musb);
  1534. spin_unlock_irqrestore(&musb->lock, flags);
  1535. return sprintf(buf, "Vbus %s, timeout %lu msec\n",
  1536. vbus ? "on" : "off", val);
  1537. }
  1538. static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
  1539. /* Gadget drivers can't know that a host is connected so they might want
  1540. * to start SRP, but users can. This allows userspace to trigger SRP.
  1541. */
  1542. static ssize_t
  1543. musb_srp_store(struct device *dev, struct device_attribute *attr,
  1544. const char *buf, size_t n)
  1545. {
  1546. struct musb *musb = dev_to_musb(dev);
  1547. unsigned short srp;
  1548. if (sscanf(buf, "%hu", &srp) != 1
  1549. || (srp != 1)) {
  1550. dev_err(dev, "SRP: Value must be 1\n");
  1551. return -EINVAL;
  1552. }
  1553. if (srp == 1)
  1554. musb_g_wakeup(musb);
  1555. return n;
  1556. }
  1557. static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
  1558. static struct attribute *musb_attributes[] = {
  1559. &dev_attr_mode.attr,
  1560. &dev_attr_vbus.attr,
  1561. &dev_attr_srp.attr,
  1562. NULL
  1563. };
  1564. static const struct attribute_group musb_attr_group = {
  1565. .attrs = musb_attributes,
  1566. };
  1567. /* Only used to provide driver mode change events */
  1568. static void musb_irq_work(struct work_struct *data)
  1569. {
  1570. struct musb *musb = container_of(data, struct musb, irq_work);
  1571. if (musb->xceiv->otg->state != musb->xceiv_old_state) {
  1572. musb->xceiv_old_state = musb->xceiv->otg->state;
  1573. sysfs_notify(&musb->controller->kobj, NULL, "mode");
  1574. }
  1575. }
  1576. static void musb_recover_from_babble(struct musb *musb)
  1577. {
  1578. int ret;
  1579. u8 devctl;
  1580. musb_disable_interrupts(musb);
  1581. /*
  1582. * wait at least 320 cycles of 60MHz clock. That's 5.3us, we will give
  1583. * it some slack and wait for 10us.
  1584. */
  1585. udelay(10);
  1586. ret = musb_platform_recover(musb);
  1587. if (ret) {
  1588. musb_enable_interrupts(musb);
  1589. return;
  1590. }
  1591. /* drop session bit */
  1592. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  1593. devctl &= ~MUSB_DEVCTL_SESSION;
  1594. musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
  1595. /* tell usbcore about it */
  1596. musb_root_disconnect(musb);
  1597. /*
  1598. * When a babble condition occurs, the musb controller
  1599. * removes the session bit and the endpoint config is lost.
  1600. */
  1601. if (musb->dyn_fifo)
  1602. ret = ep_config_from_table(musb);
  1603. else
  1604. ret = ep_config_from_hw(musb);
  1605. /* restart session */
  1606. if (ret == 0)
  1607. musb_start(musb);
  1608. }
  1609. /* --------------------------------------------------------------------------
  1610. * Init support
  1611. */
  1612. static struct musb *allocate_instance(struct device *dev,
  1613. struct musb_hdrc_config *config, void __iomem *mbase)
  1614. {
  1615. struct musb *musb;
  1616. struct musb_hw_ep *ep;
  1617. int epnum;
  1618. int ret;
  1619. musb = devm_kzalloc(dev, sizeof(*musb), GFP_KERNEL);
  1620. if (!musb)
  1621. return NULL;
  1622. INIT_LIST_HEAD(&musb->control);
  1623. INIT_LIST_HEAD(&musb->in_bulk);
  1624. INIT_LIST_HEAD(&musb->out_bulk);
  1625. musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
  1626. musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
  1627. musb->mregs = mbase;
  1628. musb->ctrl_base = mbase;
  1629. musb->nIrq = -ENODEV;
  1630. musb->config = config;
  1631. BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS);
  1632. for (epnum = 0, ep = musb->endpoints;
  1633. epnum < musb->config->num_eps;
  1634. epnum++, ep++) {
  1635. ep->musb = musb;
  1636. ep->epnum = epnum;
  1637. }
  1638. musb->controller = dev;
  1639. ret = musb_host_alloc(musb);
  1640. if (ret < 0)
  1641. goto err_free;
  1642. dev_set_drvdata(dev, musb);
  1643. return musb;
  1644. err_free:
  1645. return NULL;
  1646. }
  1647. static void musb_free(struct musb *musb)
  1648. {
  1649. /* this has multiple entry modes. it handles fault cleanup after
  1650. * probe(), where things may be partially set up, as well as rmmod
  1651. * cleanup after everything's been de-activated.
  1652. */
  1653. #ifdef CONFIG_SYSFS
  1654. sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
  1655. #endif
  1656. if (musb->nIrq >= 0) {
  1657. if (musb->irq_wake)
  1658. disable_irq_wake(musb->nIrq);
  1659. free_irq(musb->nIrq, musb);
  1660. }
  1661. musb_host_free(musb);
  1662. }
  1663. static void musb_deassert_reset(struct work_struct *work)
  1664. {
  1665. struct musb *musb;
  1666. unsigned long flags;
  1667. musb = container_of(work, struct musb, deassert_reset_work.work);
  1668. spin_lock_irqsave(&musb->lock, flags);
  1669. if (musb->port1_status & USB_PORT_STAT_RESET)
  1670. musb_port_reset(musb, false);
  1671. spin_unlock_irqrestore(&musb->lock, flags);
  1672. }
  1673. /*
  1674. * Perform generic per-controller initialization.
  1675. *
  1676. * @dev: the controller (already clocked, etc)
  1677. * @nIrq: IRQ number
  1678. * @ctrl: virtual address of controller registers,
  1679. * not yet corrected for platform-specific offsets
  1680. */
  1681. static int
  1682. musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
  1683. {
  1684. int status;
  1685. struct musb *musb;
  1686. struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
  1687. /* The driver might handle more features than the board; OK.
  1688. * Fail when the board needs a feature that's not enabled.
  1689. */
  1690. if (!plat) {
  1691. dev_dbg(dev, "no platform_data?\n");
  1692. status = -ENODEV;
  1693. goto fail0;
  1694. }
  1695. /* allocate */
  1696. musb = allocate_instance(dev, plat->config, ctrl);
  1697. if (!musb) {
  1698. status = -ENOMEM;
  1699. goto fail0;
  1700. }
  1701. spin_lock_init(&musb->lock);
  1702. musb->board_set_power = plat->set_power;
  1703. musb->min_power = plat->min_power;
  1704. musb->ops = plat->platform_ops;
  1705. musb->port_mode = plat->mode;
  1706. /*
  1707. * Initialize the default IO functions. At least omap2430 needs
  1708. * these early. We initialize the platform specific IO functions
  1709. * later on.
  1710. */
  1711. musb_readb = musb_default_readb;
  1712. musb_writeb = musb_default_writeb;
  1713. musb_readw = musb_default_readw;
  1714. musb_writew = musb_default_writew;
  1715. musb_readl = musb_default_readl;
  1716. musb_writel = musb_default_writel;
  1717. /* We need musb_read/write functions initialized for PM */
  1718. pm_runtime_use_autosuspend(musb->controller);
  1719. pm_runtime_set_autosuspend_delay(musb->controller, 200);
  1720. pm_runtime_irq_safe(musb->controller);
  1721. pm_runtime_enable(musb->controller);
  1722. /* The musb_platform_init() call:
  1723. * - adjusts musb->mregs
  1724. * - sets the musb->isr
  1725. * - may initialize an integrated transceiver
  1726. * - initializes musb->xceiv, usually by otg_get_phy()
  1727. * - stops powering VBUS
  1728. *
  1729. * There are various transceiver configurations. Blackfin,
  1730. * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
  1731. * external/discrete ones in various flavors (twl4030 family,
  1732. * isp1504, non-OTG, etc) mostly hooking up through ULPI.
  1733. */
  1734. status = musb_platform_init(musb);
  1735. if (status < 0)
  1736. goto fail1;
  1737. if (!musb->isr) {
  1738. status = -ENODEV;
  1739. goto fail2;
  1740. }
  1741. if (musb->ops->quirks)
  1742. musb->io.quirks = musb->ops->quirks;
  1743. /* Most devices use indexed offset or flat offset */
  1744. if (musb->io.quirks & MUSB_INDEXED_EP) {
  1745. musb->io.ep_offset = musb_indexed_ep_offset;
  1746. musb->io.ep_select = musb_indexed_ep_select;
  1747. } else {
  1748. musb->io.ep_offset = musb_flat_ep_offset;
  1749. musb->io.ep_select = musb_flat_ep_select;
  1750. }
  1751. /* At least tusb6010 has its own offsets */
  1752. if (musb->ops->ep_offset)
  1753. musb->io.ep_offset = musb->ops->ep_offset;
  1754. if (musb->ops->ep_select)
  1755. musb->io.ep_select = musb->ops->ep_select;
  1756. if (musb->ops->fifo_mode)
  1757. fifo_mode = musb->ops->fifo_mode;
  1758. else
  1759. fifo_mode = 4;
  1760. if (musb->ops->fifo_offset)
  1761. musb->io.fifo_offset = musb->ops->fifo_offset;
  1762. else
  1763. musb->io.fifo_offset = musb_default_fifo_offset;
  1764. if (musb->ops->readb)
  1765. musb_readb = musb->ops->readb;
  1766. if (musb->ops->writeb)
  1767. musb_writeb = musb->ops->writeb;
  1768. if (musb->ops->readw)
  1769. musb_readw = musb->ops->readw;
  1770. if (musb->ops->writew)
  1771. musb_writew = musb->ops->writew;
  1772. if (musb->ops->readl)
  1773. musb_readl = musb->ops->readl;
  1774. if (musb->ops->writel)
  1775. musb_writel = musb->ops->writel;
  1776. if (musb->ops->read_fifo)
  1777. musb->io.read_fifo = musb->ops->read_fifo;
  1778. else
  1779. musb->io.read_fifo = musb_default_read_fifo;
  1780. if (musb->ops->write_fifo)
  1781. musb->io.write_fifo = musb->ops->write_fifo;
  1782. else
  1783. musb->io.write_fifo = musb_default_write_fifo;
  1784. if (!musb->xceiv->io_ops) {
  1785. musb->xceiv->io_dev = musb->controller;
  1786. musb->xceiv->io_priv = musb->mregs;
  1787. musb->xceiv->io_ops = &musb_ulpi_access;
  1788. }
  1789. pm_runtime_get_sync(musb->controller);
  1790. if (use_dma && dev->dma_mask) {
  1791. musb->dma_controller = dma_controller_create(musb, musb->mregs);
  1792. if (IS_ERR(musb->dma_controller)) {
  1793. status = PTR_ERR(musb->dma_controller);
  1794. goto fail2_5;
  1795. }
  1796. }
  1797. /* be sure interrupts are disabled before connecting ISR */
  1798. musb_platform_disable(musb);
  1799. musb_generic_disable(musb);
  1800. /* Init IRQ workqueue before request_irq */
  1801. INIT_WORK(&musb->irq_work, musb_irq_work);
  1802. INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
  1803. INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
  1804. /* setup musb parts of the core (especially endpoints) */
  1805. status = musb_core_init(plat->config->multipoint
  1806. ? MUSB_CONTROLLER_MHDRC
  1807. : MUSB_CONTROLLER_HDRC, musb);
  1808. if (status < 0)
  1809. goto fail3;
  1810. setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
  1811. /* attach to the IRQ */
  1812. if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
  1813. dev_err(dev, "request_irq %d failed!\n", nIrq);
  1814. status = -ENODEV;
  1815. goto fail3;
  1816. }
  1817. musb->nIrq = nIrq;
  1818. /* FIXME this handles wakeup irqs wrong */
  1819. if (enable_irq_wake(nIrq) == 0) {
  1820. musb->irq_wake = 1;
  1821. device_init_wakeup(dev, 1);
  1822. } else {
  1823. musb->irq_wake = 0;
  1824. }
  1825. /* program PHY to use external vBus if required */
  1826. if (plat->extvbus) {
  1827. u8 busctl = musb_read_ulpi_buscontrol(musb->mregs);
  1828. busctl |= MUSB_ULPI_USE_EXTVBUS;
  1829. musb_write_ulpi_buscontrol(musb->mregs, busctl);
  1830. }
  1831. if (musb->xceiv->otg->default_a) {
  1832. MUSB_HST_MODE(musb);
  1833. musb->xceiv->otg->state = OTG_STATE_A_IDLE;
  1834. } else {
  1835. MUSB_DEV_MODE(musb);
  1836. musb->xceiv->otg->state = OTG_STATE_B_IDLE;
  1837. }
  1838. switch (musb->port_mode) {
  1839. case MUSB_PORT_MODE_HOST:
  1840. status = musb_host_setup(musb, plat->power);
  1841. if (status < 0)
  1842. goto fail3;
  1843. status = musb_platform_set_mode(musb, MUSB_HOST);
  1844. break;
  1845. case MUSB_PORT_MODE_GADGET:
  1846. status = musb_gadget_setup(musb);
  1847. if (status < 0)
  1848. goto fail3;
  1849. status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
  1850. break;
  1851. case MUSB_PORT_MODE_DUAL_ROLE:
  1852. status = musb_host_setup(musb, plat->power);
  1853. if (status < 0)
  1854. goto fail3;
  1855. status = musb_gadget_setup(musb);
  1856. if (status) {
  1857. musb_host_cleanup(musb);
  1858. goto fail3;
  1859. }
  1860. status = musb_platform_set_mode(musb, MUSB_OTG);
  1861. break;
  1862. default:
  1863. dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
  1864. break;
  1865. }
  1866. if (status < 0)
  1867. goto fail3;
  1868. status = musb_init_debugfs(musb);
  1869. if (status < 0)
  1870. goto fail4;
  1871. status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
  1872. if (status)
  1873. goto fail5;
  1874. pm_runtime_put(musb->controller);
  1875. return 0;
  1876. fail5:
  1877. musb_exit_debugfs(musb);
  1878. fail4:
  1879. musb_gadget_cleanup(musb);
  1880. musb_host_cleanup(musb);
  1881. fail3:
  1882. cancel_work_sync(&musb->irq_work);
  1883. cancel_delayed_work_sync(&musb->finish_resume_work);
  1884. cancel_delayed_work_sync(&musb->deassert_reset_work);
  1885. if (musb->dma_controller)
  1886. dma_controller_destroy(musb->dma_controller);
  1887. fail2_5:
  1888. pm_runtime_put_sync(musb->controller);
  1889. fail2:
  1890. if (musb->irq_wake)
  1891. device_init_wakeup(dev, 0);
  1892. musb_platform_exit(musb);
  1893. fail1:
  1894. pm_runtime_disable(musb->controller);
  1895. dev_err(musb->controller,
  1896. "musb_init_controller failed with status %d\n", status);
  1897. musb_free(musb);
  1898. fail0:
  1899. return status;
  1900. }
  1901. /*-------------------------------------------------------------------------*/
  1902. /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
  1903. * bridge to a platform device; this driver then suffices.
  1904. */
  1905. static int musb_probe(struct platform_device *pdev)
  1906. {
  1907. struct device *dev = &pdev->dev;
  1908. int irq = platform_get_irq_byname(pdev, "mc");
  1909. struct resource *iomem;
  1910. void __iomem *base;
  1911. if (irq <= 0)
  1912. return -ENODEV;
  1913. iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1914. base = devm_ioremap_resource(dev, iomem);
  1915. if (IS_ERR(base))
  1916. return PTR_ERR(base);
  1917. return musb_init_controller(dev, irq, base);
  1918. }
  1919. static int musb_remove(struct platform_device *pdev)
  1920. {
  1921. struct device *dev = &pdev->dev;
  1922. struct musb *musb = dev_to_musb(dev);
  1923. /* this gets called on rmmod.
  1924. * - Host mode: host may still be active
  1925. * - Peripheral mode: peripheral is deactivated (or never-activated)
  1926. * - OTG mode: both roles are deactivated (or never-activated)
  1927. */
  1928. musb_exit_debugfs(musb);
  1929. musb_shutdown(pdev);
  1930. if (musb->dma_controller)
  1931. dma_controller_destroy(musb->dma_controller);
  1932. cancel_work_sync(&musb->irq_work);
  1933. cancel_delayed_work_sync(&musb->finish_resume_work);
  1934. cancel_delayed_work_sync(&musb->deassert_reset_work);
  1935. musb_free(musb);
  1936. device_init_wakeup(dev, 0);
  1937. return 0;
  1938. }
  1939. #ifdef CONFIG_PM
  1940. static void musb_save_context(struct musb *musb)
  1941. {
  1942. int i;
  1943. void __iomem *musb_base = musb->mregs;
  1944. void __iomem *epio;
  1945. musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
  1946. musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
  1947. musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
  1948. musb->context.power = musb_readb(musb_base, MUSB_POWER);
  1949. musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
  1950. musb->context.index = musb_readb(musb_base, MUSB_INDEX);
  1951. musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
  1952. for (i = 0; i < musb->config->num_eps; ++i) {
  1953. struct musb_hw_ep *hw_ep;
  1954. hw_ep = &musb->endpoints[i];
  1955. if (!hw_ep)
  1956. continue;
  1957. epio = hw_ep->regs;
  1958. if (!epio)
  1959. continue;
  1960. musb_writeb(musb_base, MUSB_INDEX, i);
  1961. musb->context.index_regs[i].txmaxp =
  1962. musb_readw(epio, MUSB_TXMAXP);
  1963. musb->context.index_regs[i].txcsr =
  1964. musb_readw(epio, MUSB_TXCSR);
  1965. musb->context.index_regs[i].rxmaxp =
  1966. musb_readw(epio, MUSB_RXMAXP);
  1967. musb->context.index_regs[i].rxcsr =
  1968. musb_readw(epio, MUSB_RXCSR);
  1969. if (musb->dyn_fifo) {
  1970. musb->context.index_regs[i].txfifoadd =
  1971. musb_read_txfifoadd(musb_base);
  1972. musb->context.index_regs[i].rxfifoadd =
  1973. musb_read_rxfifoadd(musb_base);
  1974. musb->context.index_regs[i].txfifosz =
  1975. musb_read_txfifosz(musb_base);
  1976. musb->context.index_regs[i].rxfifosz =
  1977. musb_read_rxfifosz(musb_base);
  1978. }
  1979. musb->context.index_regs[i].txtype =
  1980. musb_readb(epio, MUSB_TXTYPE);
  1981. musb->context.index_regs[i].txinterval =
  1982. musb_readb(epio, MUSB_TXINTERVAL);
  1983. musb->context.index_regs[i].rxtype =
  1984. musb_readb(epio, MUSB_RXTYPE);
  1985. musb->context.index_regs[i].rxinterval =
  1986. musb_readb(epio, MUSB_RXINTERVAL);
  1987. musb->context.index_regs[i].txfunaddr =
  1988. musb_read_txfunaddr(musb_base, i);
  1989. musb->context.index_regs[i].txhubaddr =
  1990. musb_read_txhubaddr(musb_base, i);
  1991. musb->context.index_regs[i].txhubport =
  1992. musb_read_txhubport(musb_base, i);
  1993. musb->context.index_regs[i].rxfunaddr =
  1994. musb_read_rxfunaddr(musb_base, i);
  1995. musb->context.index_regs[i].rxhubaddr =
  1996. musb_read_rxhubaddr(musb_base, i);
  1997. musb->context.index_regs[i].rxhubport =
  1998. musb_read_rxhubport(musb_base, i);
  1999. }
  2000. }
  2001. static void musb_restore_context(struct musb *musb)
  2002. {
  2003. int i;
  2004. void __iomem *musb_base = musb->mregs;
  2005. void __iomem *ep_target_regs;
  2006. void __iomem *epio;
  2007. u8 power;
  2008. musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
  2009. musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
  2010. musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
  2011. /* Don't affect SUSPENDM/RESUME bits in POWER reg */
  2012. power = musb_readb(musb_base, MUSB_POWER);
  2013. power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
  2014. musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
  2015. power |= musb->context.power;
  2016. musb_writeb(musb_base, MUSB_POWER, power);
  2017. musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
  2018. musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
  2019. musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
  2020. musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
  2021. for (i = 0; i < musb->config->num_eps; ++i) {
  2022. struct musb_hw_ep *hw_ep;
  2023. hw_ep = &musb->endpoints[i];
  2024. if (!hw_ep)
  2025. continue;
  2026. epio = hw_ep->regs;
  2027. if (!epio)
  2028. continue;
  2029. musb_writeb(musb_base, MUSB_INDEX, i);
  2030. musb_writew(epio, MUSB_TXMAXP,
  2031. musb->context.index_regs[i].txmaxp);
  2032. musb_writew(epio, MUSB_TXCSR,
  2033. musb->context.index_regs[i].txcsr);
  2034. musb_writew(epio, MUSB_RXMAXP,
  2035. musb->context.index_regs[i].rxmaxp);
  2036. musb_writew(epio, MUSB_RXCSR,
  2037. musb->context.index_regs[i].rxcsr);
  2038. if (musb->dyn_fifo) {
  2039. musb_write_txfifosz(musb_base,
  2040. musb->context.index_regs[i].txfifosz);
  2041. musb_write_rxfifosz(musb_base,
  2042. musb->context.index_regs[i].rxfifosz);
  2043. musb_write_txfifoadd(musb_base,
  2044. musb->context.index_regs[i].txfifoadd);
  2045. musb_write_rxfifoadd(musb_base,
  2046. musb->context.index_regs[i].rxfifoadd);
  2047. }
  2048. musb_writeb(epio, MUSB_TXTYPE,
  2049. musb->context.index_regs[i].txtype);
  2050. musb_writeb(epio, MUSB_TXINTERVAL,
  2051. musb->context.index_regs[i].txinterval);
  2052. musb_writeb(epio, MUSB_RXTYPE,
  2053. musb->context.index_regs[i].rxtype);
  2054. musb_writeb(epio, MUSB_RXINTERVAL,
  2055. musb->context.index_regs[i].rxinterval);
  2056. musb_write_txfunaddr(musb_base, i,
  2057. musb->context.index_regs[i].txfunaddr);
  2058. musb_write_txhubaddr(musb_base, i,
  2059. musb->context.index_regs[i].txhubaddr);
  2060. musb_write_txhubport(musb_base, i,
  2061. musb->context.index_regs[i].txhubport);
  2062. ep_target_regs =
  2063. musb_read_target_reg_base(i, musb_base);
  2064. musb_write_rxfunaddr(ep_target_regs,
  2065. musb->context.index_regs[i].rxfunaddr);
  2066. musb_write_rxhubaddr(ep_target_regs,
  2067. musb->context.index_regs[i].rxhubaddr);
  2068. musb_write_rxhubport(ep_target_regs,
  2069. musb->context.index_regs[i].rxhubport);
  2070. }
  2071. musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
  2072. }
  2073. static int musb_suspend(struct device *dev)
  2074. {
  2075. struct musb *musb = dev_to_musb(dev);
  2076. unsigned long flags;
  2077. spin_lock_irqsave(&musb->lock, flags);
  2078. if (is_peripheral_active(musb)) {
  2079. /* FIXME force disconnect unless we know USB will wake
  2080. * the system up quickly enough to respond ...
  2081. */
  2082. } else if (is_host_active(musb)) {
  2083. /* we know all the children are suspended; sometimes
  2084. * they will even be wakeup-enabled.
  2085. */
  2086. }
  2087. musb_save_context(musb);
  2088. spin_unlock_irqrestore(&musb->lock, flags);
  2089. return 0;
  2090. }
  2091. static int musb_resume(struct device *dev)
  2092. {
  2093. struct musb *musb = dev_to_musb(dev);
  2094. u8 devctl;
  2095. u8 mask;
  2096. /*
  2097. * For static cmos like DaVinci, register values were preserved
  2098. * unless for some reason the whole soc powered down or the USB
  2099. * module got reset through the PSC (vs just being disabled).
  2100. *
  2101. * For the DSPS glue layer though, a full register restore has to
  2102. * be done. As it shouldn't harm other platforms, we do it
  2103. * unconditionally.
  2104. */
  2105. musb_restore_context(musb);
  2106. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  2107. mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
  2108. if ((devctl & mask) != (musb->context.devctl & mask))
  2109. musb->port1_status = 0;
  2110. if (musb->need_finish_resume) {
  2111. musb->need_finish_resume = 0;
  2112. schedule_delayed_work(&musb->finish_resume_work,
  2113. msecs_to_jiffies(USB_RESUME_TIMEOUT));
  2114. }
  2115. /*
  2116. * The USB HUB code expects the device to be in RPM_ACTIVE once it came
  2117. * out of suspend
  2118. */
  2119. pm_runtime_disable(dev);
  2120. pm_runtime_set_active(dev);
  2121. pm_runtime_enable(dev);
  2122. return 0;
  2123. }
  2124. static int musb_runtime_suspend(struct device *dev)
  2125. {
  2126. struct musb *musb = dev_to_musb(dev);
  2127. musb_save_context(musb);
  2128. return 0;
  2129. }
  2130. static int musb_runtime_resume(struct device *dev)
  2131. {
  2132. struct musb *musb = dev_to_musb(dev);
  2133. static int first = 1;
  2134. /*
  2135. * When pm_runtime_get_sync called for the first time in driver
  2136. * init, some of the structure is still not initialized which is
  2137. * used in restore function. But clock needs to be
  2138. * enabled before any register access, so
  2139. * pm_runtime_get_sync has to be called.
  2140. * Also context restore without save does not make
  2141. * any sense
  2142. */
  2143. if (!first)
  2144. musb_restore_context(musb);
  2145. first = 0;
  2146. if (musb->need_finish_resume) {
  2147. musb->need_finish_resume = 0;
  2148. schedule_delayed_work(&musb->finish_resume_work,
  2149. msecs_to_jiffies(USB_RESUME_TIMEOUT));
  2150. }
  2151. return 0;
  2152. }
  2153. static const struct dev_pm_ops musb_dev_pm_ops = {
  2154. .suspend = musb_suspend,
  2155. .resume = musb_resume,
  2156. .runtime_suspend = musb_runtime_suspend,
  2157. .runtime_resume = musb_runtime_resume,
  2158. };
  2159. #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
  2160. #else
  2161. #define MUSB_DEV_PM_OPS NULL
  2162. #endif
  2163. static struct platform_driver musb_driver = {
  2164. .driver = {
  2165. .name = (char *)musb_driver_name,
  2166. .bus = &platform_bus_type,
  2167. .pm = MUSB_DEV_PM_OPS,
  2168. },
  2169. .probe = musb_probe,
  2170. .remove = musb_remove,
  2171. .shutdown = musb_shutdown,
  2172. };
  2173. module_platform_driver(musb_driver);