core.c 93 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297
  1. /*
  2. * core.c - DesignWare HS OTG Controller common routines
  3. *
  4. * Copyright (C) 2004-2013 Synopsys, Inc.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions, and the following disclaimer,
  11. * without modification.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. The names of the above-listed copyright holders may not be used
  16. * to endorse or promote products derived from this software without
  17. * specific prior written permission.
  18. *
  19. * ALTERNATIVELY, this software may be distributed under the terms of the
  20. * GNU General Public License ("GPL") as published by the Free Software
  21. * Foundation; either version 2 of the License, or (at your option) any
  22. * later version.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  25. * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  28. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  29. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  30. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  31. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  32. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  33. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  34. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. /*
  37. * The Core code provides basic services for accessing and managing the
  38. * DWC_otg hardware. These services are used by both the Host Controller
  39. * Driver and the Peripheral Controller Driver.
  40. */
  41. #include <linux/kernel.h>
  42. #include <linux/module.h>
  43. #include <linux/moduleparam.h>
  44. #include <linux/spinlock.h>
  45. #include <linux/interrupt.h>
  46. #include <linux/dma-mapping.h>
  47. #include <linux/delay.h>
  48. #include <linux/io.h>
  49. #include <linux/slab.h>
  50. #include <linux/usb.h>
  51. #include <linux/usb/hcd.h>
  52. #include <linux/usb/ch11.h>
  53. #include "core.h"
  54. #include "hcd.h"
  55. #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
  56. /**
  57. * dwc2_backup_host_registers() - Backup controller host registers.
  58. * When suspending usb bus, registers needs to be backuped
  59. * if controller power is disabled once suspended.
  60. *
  61. * @hsotg: Programming view of the DWC_otg controller
  62. */
  63. static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
  64. {
  65. struct dwc2_hregs_backup *hr;
  66. int i;
  67. dev_dbg(hsotg->dev, "%s\n", __func__);
  68. /* Backup Host regs */
  69. hr = hsotg->hr_backup;
  70. if (!hr) {
  71. hr = devm_kzalloc(hsotg->dev, sizeof(*hr), GFP_KERNEL);
  72. if (!hr) {
  73. dev_err(hsotg->dev, "%s: can't allocate host regs\n",
  74. __func__);
  75. return -ENOMEM;
  76. }
  77. hsotg->hr_backup = hr;
  78. }
  79. hr->hcfg = readl(hsotg->regs + HCFG);
  80. hr->haintmsk = readl(hsotg->regs + HAINTMSK);
  81. for (i = 0; i < hsotg->core_params->host_channels; ++i)
  82. hr->hcintmsk[i] = readl(hsotg->regs + HCINTMSK(i));
  83. hr->hprt0 = readl(hsotg->regs + HPRT0);
  84. hr->hfir = readl(hsotg->regs + HFIR);
  85. return 0;
  86. }
  87. /**
  88. * dwc2_restore_host_registers() - Restore controller host registers.
  89. * When resuming usb bus, device registers needs to be restored
  90. * if controller power were disabled.
  91. *
  92. * @hsotg: Programming view of the DWC_otg controller
  93. */
  94. static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
  95. {
  96. struct dwc2_hregs_backup *hr;
  97. int i;
  98. dev_dbg(hsotg->dev, "%s\n", __func__);
  99. /* Restore host regs */
  100. hr = hsotg->hr_backup;
  101. if (!hr) {
  102. dev_err(hsotg->dev, "%s: no host registers to restore\n",
  103. __func__);
  104. return -EINVAL;
  105. }
  106. writel(hr->hcfg, hsotg->regs + HCFG);
  107. writel(hr->haintmsk, hsotg->regs + HAINTMSK);
  108. for (i = 0; i < hsotg->core_params->host_channels; ++i)
  109. writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
  110. writel(hr->hprt0, hsotg->regs + HPRT0);
  111. writel(hr->hfir, hsotg->regs + HFIR);
  112. return 0;
  113. }
  114. #else
  115. static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
  116. { return 0; }
  117. static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
  118. { return 0; }
  119. #endif
  120. #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
  121. IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
  122. /**
  123. * dwc2_backup_device_registers() - Backup controller device registers.
  124. * When suspending usb bus, registers needs to be backuped
  125. * if controller power is disabled once suspended.
  126. *
  127. * @hsotg: Programming view of the DWC_otg controller
  128. */
  129. static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
  130. {
  131. struct dwc2_dregs_backup *dr;
  132. int i;
  133. dev_dbg(hsotg->dev, "%s\n", __func__);
  134. /* Backup dev regs */
  135. dr = hsotg->dr_backup;
  136. if (!dr) {
  137. dr = devm_kzalloc(hsotg->dev, sizeof(*dr), GFP_KERNEL);
  138. if (!dr) {
  139. dev_err(hsotg->dev, "%s: can't allocate device regs\n",
  140. __func__);
  141. return -ENOMEM;
  142. }
  143. hsotg->dr_backup = dr;
  144. }
  145. dr->dcfg = readl(hsotg->regs + DCFG);
  146. dr->dctl = readl(hsotg->regs + DCTL);
  147. dr->daintmsk = readl(hsotg->regs + DAINTMSK);
  148. dr->diepmsk = readl(hsotg->regs + DIEPMSK);
  149. dr->doepmsk = readl(hsotg->regs + DOEPMSK);
  150. for (i = 0; i < hsotg->num_of_eps; i++) {
  151. /* Backup IN EPs */
  152. dr->diepctl[i] = readl(hsotg->regs + DIEPCTL(i));
  153. /* Ensure DATA PID is correctly configured */
  154. if (dr->diepctl[i] & DXEPCTL_DPID)
  155. dr->diepctl[i] |= DXEPCTL_SETD1PID;
  156. else
  157. dr->diepctl[i] |= DXEPCTL_SETD0PID;
  158. dr->dieptsiz[i] = readl(hsotg->regs + DIEPTSIZ(i));
  159. dr->diepdma[i] = readl(hsotg->regs + DIEPDMA(i));
  160. /* Backup OUT EPs */
  161. dr->doepctl[i] = readl(hsotg->regs + DOEPCTL(i));
  162. /* Ensure DATA PID is correctly configured */
  163. if (dr->doepctl[i] & DXEPCTL_DPID)
  164. dr->doepctl[i] |= DXEPCTL_SETD1PID;
  165. else
  166. dr->doepctl[i] |= DXEPCTL_SETD0PID;
  167. dr->doeptsiz[i] = readl(hsotg->regs + DOEPTSIZ(i));
  168. dr->doepdma[i] = readl(hsotg->regs + DOEPDMA(i));
  169. }
  170. return 0;
  171. }
  172. /**
  173. * dwc2_restore_device_registers() - Restore controller device registers.
  174. * When resuming usb bus, device registers needs to be restored
  175. * if controller power were disabled.
  176. *
  177. * @hsotg: Programming view of the DWC_otg controller
  178. */
  179. static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
  180. {
  181. struct dwc2_dregs_backup *dr;
  182. u32 dctl;
  183. int i;
  184. dev_dbg(hsotg->dev, "%s\n", __func__);
  185. /* Restore dev regs */
  186. dr = hsotg->dr_backup;
  187. if (!dr) {
  188. dev_err(hsotg->dev, "%s: no device registers to restore\n",
  189. __func__);
  190. return -EINVAL;
  191. }
  192. writel(dr->dcfg, hsotg->regs + DCFG);
  193. writel(dr->dctl, hsotg->regs + DCTL);
  194. writel(dr->daintmsk, hsotg->regs + DAINTMSK);
  195. writel(dr->diepmsk, hsotg->regs + DIEPMSK);
  196. writel(dr->doepmsk, hsotg->regs + DOEPMSK);
  197. for (i = 0; i < hsotg->num_of_eps; i++) {
  198. /* Restore IN EPs */
  199. writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i));
  200. writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i));
  201. writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i));
  202. /* Restore OUT EPs */
  203. writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i));
  204. writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i));
  205. writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i));
  206. }
  207. /* Set the Power-On Programming done bit */
  208. dctl = readl(hsotg->regs + DCTL);
  209. dctl |= DCTL_PWRONPRGDONE;
  210. writel(dctl, hsotg->regs + DCTL);
  211. return 0;
  212. }
  213. #else
  214. static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
  215. { return 0; }
  216. static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
  217. { return 0; }
  218. #endif
  219. /**
  220. * dwc2_backup_global_registers() - Backup global controller registers.
  221. * When suspending usb bus, registers needs to be backuped
  222. * if controller power is disabled once suspended.
  223. *
  224. * @hsotg: Programming view of the DWC_otg controller
  225. */
  226. static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
  227. {
  228. struct dwc2_gregs_backup *gr;
  229. int i;
  230. /* Backup global regs */
  231. gr = hsotg->gr_backup;
  232. if (!gr) {
  233. gr = devm_kzalloc(hsotg->dev, sizeof(*gr), GFP_KERNEL);
  234. if (!gr) {
  235. dev_err(hsotg->dev, "%s: can't allocate global regs\n",
  236. __func__);
  237. return -ENOMEM;
  238. }
  239. hsotg->gr_backup = gr;
  240. }
  241. gr->gotgctl = readl(hsotg->regs + GOTGCTL);
  242. gr->gintmsk = readl(hsotg->regs + GINTMSK);
  243. gr->gahbcfg = readl(hsotg->regs + GAHBCFG);
  244. gr->gusbcfg = readl(hsotg->regs + GUSBCFG);
  245. gr->grxfsiz = readl(hsotg->regs + GRXFSIZ);
  246. gr->gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ);
  247. gr->hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
  248. gr->gdfifocfg = readl(hsotg->regs + GDFIFOCFG);
  249. for (i = 0; i < MAX_EPS_CHANNELS; i++)
  250. gr->dtxfsiz[i] = readl(hsotg->regs + DPTXFSIZN(i));
  251. return 0;
  252. }
  253. /**
  254. * dwc2_restore_global_registers() - Restore controller global registers.
  255. * When resuming usb bus, device registers needs to be restored
  256. * if controller power were disabled.
  257. *
  258. * @hsotg: Programming view of the DWC_otg controller
  259. */
  260. static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
  261. {
  262. struct dwc2_gregs_backup *gr;
  263. int i;
  264. dev_dbg(hsotg->dev, "%s\n", __func__);
  265. /* Restore global regs */
  266. gr = hsotg->gr_backup;
  267. if (!gr) {
  268. dev_err(hsotg->dev, "%s: no global registers to restore\n",
  269. __func__);
  270. return -EINVAL;
  271. }
  272. writel(0xffffffff, hsotg->regs + GINTSTS);
  273. writel(gr->gotgctl, hsotg->regs + GOTGCTL);
  274. writel(gr->gintmsk, hsotg->regs + GINTMSK);
  275. writel(gr->gusbcfg, hsotg->regs + GUSBCFG);
  276. writel(gr->gahbcfg, hsotg->regs + GAHBCFG);
  277. writel(gr->grxfsiz, hsotg->regs + GRXFSIZ);
  278. writel(gr->gnptxfsiz, hsotg->regs + GNPTXFSIZ);
  279. writel(gr->hptxfsiz, hsotg->regs + HPTXFSIZ);
  280. writel(gr->gdfifocfg, hsotg->regs + GDFIFOCFG);
  281. for (i = 0; i < MAX_EPS_CHANNELS; i++)
  282. writel(gr->dtxfsiz[i], hsotg->regs + DPTXFSIZN(i));
  283. return 0;
  284. }
  285. /**
  286. * dwc2_exit_hibernation() - Exit controller from Partial Power Down.
  287. *
  288. * @hsotg: Programming view of the DWC_otg controller
  289. * @restore: Controller registers need to be restored
  290. */
  291. int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore)
  292. {
  293. u32 pcgcctl;
  294. int ret = 0;
  295. if (!hsotg->core_params->hibernation)
  296. return -ENOTSUPP;
  297. pcgcctl = readl(hsotg->regs + PCGCTL);
  298. pcgcctl &= ~PCGCTL_STOPPCLK;
  299. writel(pcgcctl, hsotg->regs + PCGCTL);
  300. pcgcctl = readl(hsotg->regs + PCGCTL);
  301. pcgcctl &= ~PCGCTL_PWRCLMP;
  302. writel(pcgcctl, hsotg->regs + PCGCTL);
  303. pcgcctl = readl(hsotg->regs + PCGCTL);
  304. pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
  305. writel(pcgcctl, hsotg->regs + PCGCTL);
  306. udelay(100);
  307. if (restore) {
  308. ret = dwc2_restore_global_registers(hsotg);
  309. if (ret) {
  310. dev_err(hsotg->dev, "%s: failed to restore registers\n",
  311. __func__);
  312. return ret;
  313. }
  314. if (dwc2_is_host_mode(hsotg)) {
  315. ret = dwc2_restore_host_registers(hsotg);
  316. if (ret) {
  317. dev_err(hsotg->dev, "%s: failed to restore host registers\n",
  318. __func__);
  319. return ret;
  320. }
  321. } else {
  322. ret = dwc2_restore_device_registers(hsotg);
  323. if (ret) {
  324. dev_err(hsotg->dev, "%s: failed to restore device registers\n",
  325. __func__);
  326. return ret;
  327. }
  328. }
  329. }
  330. return ret;
  331. }
  332. /**
  333. * dwc2_enter_hibernation() - Put controller in Partial Power Down.
  334. *
  335. * @hsotg: Programming view of the DWC_otg controller
  336. */
  337. int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
  338. {
  339. u32 pcgcctl;
  340. int ret = 0;
  341. if (!hsotg->core_params->hibernation)
  342. return -ENOTSUPP;
  343. /* Backup all registers */
  344. ret = dwc2_backup_global_registers(hsotg);
  345. if (ret) {
  346. dev_err(hsotg->dev, "%s: failed to backup global registers\n",
  347. __func__);
  348. return ret;
  349. }
  350. if (dwc2_is_host_mode(hsotg)) {
  351. ret = dwc2_backup_host_registers(hsotg);
  352. if (ret) {
  353. dev_err(hsotg->dev, "%s: failed to backup host registers\n",
  354. __func__);
  355. return ret;
  356. }
  357. } else {
  358. ret = dwc2_backup_device_registers(hsotg);
  359. if (ret) {
  360. dev_err(hsotg->dev, "%s: failed to backup device registers\n",
  361. __func__);
  362. return ret;
  363. }
  364. }
  365. /* Put the controller in low power state */
  366. pcgcctl = readl(hsotg->regs + PCGCTL);
  367. pcgcctl |= PCGCTL_PWRCLMP;
  368. writel(pcgcctl, hsotg->regs + PCGCTL);
  369. ndelay(20);
  370. pcgcctl |= PCGCTL_RSTPDWNMODULE;
  371. writel(pcgcctl, hsotg->regs + PCGCTL);
  372. ndelay(20);
  373. pcgcctl |= PCGCTL_STOPPCLK;
  374. writel(pcgcctl, hsotg->regs + PCGCTL);
  375. return ret;
  376. }
  377. /**
  378. * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
  379. * used in both device and host modes
  380. *
  381. * @hsotg: Programming view of the DWC_otg controller
  382. */
  383. static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
  384. {
  385. u32 intmsk;
  386. /* Clear any pending OTG Interrupts */
  387. writel(0xffffffff, hsotg->regs + GOTGINT);
  388. /* Clear any pending interrupts */
  389. writel(0xffffffff, hsotg->regs + GINTSTS);
  390. /* Enable the interrupts in the GINTMSK */
  391. intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
  392. if (hsotg->core_params->dma_enable <= 0)
  393. intmsk |= GINTSTS_RXFLVL;
  394. if (hsotg->core_params->external_id_pin_ctl <= 0)
  395. intmsk |= GINTSTS_CONIDSTSCHNG;
  396. intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
  397. GINTSTS_SESSREQINT;
  398. writel(intmsk, hsotg->regs + GINTMSK);
  399. }
  400. /*
  401. * Initializes the FSLSPClkSel field of the HCFG register depending on the
  402. * PHY type
  403. */
  404. static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
  405. {
  406. u32 hcfg, val;
  407. if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
  408. hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
  409. hsotg->core_params->ulpi_fs_ls > 0) ||
  410. hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
  411. /* Full speed PHY */
  412. val = HCFG_FSLSPCLKSEL_48_MHZ;
  413. } else {
  414. /* High speed PHY running at full speed or high speed */
  415. val = HCFG_FSLSPCLKSEL_30_60_MHZ;
  416. }
  417. dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
  418. hcfg = readl(hsotg->regs + HCFG);
  419. hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
  420. hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
  421. writel(hcfg, hsotg->regs + HCFG);
  422. }
  423. /*
  424. * Do core a soft reset of the core. Be careful with this because it
  425. * resets all the internal state machines of the core.
  426. */
  427. static int dwc2_core_reset(struct dwc2_hsotg *hsotg)
  428. {
  429. u32 greset;
  430. int count = 0;
  431. u32 gusbcfg;
  432. dev_vdbg(hsotg->dev, "%s()\n", __func__);
  433. /* Wait for AHB master IDLE state */
  434. do {
  435. usleep_range(20000, 40000);
  436. greset = readl(hsotg->regs + GRSTCTL);
  437. if (++count > 50) {
  438. dev_warn(hsotg->dev,
  439. "%s() HANG! AHB Idle GRSTCTL=%0x\n",
  440. __func__, greset);
  441. return -EBUSY;
  442. }
  443. } while (!(greset & GRSTCTL_AHBIDLE));
  444. /* Core Soft Reset */
  445. count = 0;
  446. greset |= GRSTCTL_CSFTRST;
  447. writel(greset, hsotg->regs + GRSTCTL);
  448. do {
  449. usleep_range(20000, 40000);
  450. greset = readl(hsotg->regs + GRSTCTL);
  451. if (++count > 50) {
  452. dev_warn(hsotg->dev,
  453. "%s() HANG! Soft Reset GRSTCTL=%0x\n",
  454. __func__, greset);
  455. return -EBUSY;
  456. }
  457. } while (greset & GRSTCTL_CSFTRST);
  458. if (hsotg->dr_mode == USB_DR_MODE_HOST) {
  459. gusbcfg = readl(hsotg->regs + GUSBCFG);
  460. gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
  461. gusbcfg |= GUSBCFG_FORCEHOSTMODE;
  462. writel(gusbcfg, hsotg->regs + GUSBCFG);
  463. } else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
  464. gusbcfg = readl(hsotg->regs + GUSBCFG);
  465. gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
  466. gusbcfg |= GUSBCFG_FORCEDEVMODE;
  467. writel(gusbcfg, hsotg->regs + GUSBCFG);
  468. } else if (hsotg->dr_mode == USB_DR_MODE_OTG) {
  469. gusbcfg = readl(hsotg->regs + GUSBCFG);
  470. gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
  471. gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
  472. writel(gusbcfg, hsotg->regs + GUSBCFG);
  473. }
  474. /*
  475. * NOTE: This long sleep is _very_ important, otherwise the core will
  476. * not stay in host mode after a connector ID change!
  477. */
  478. usleep_range(150000, 200000);
  479. return 0;
  480. }
  481. static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
  482. {
  483. u32 usbcfg, i2cctl;
  484. int retval = 0;
  485. /*
  486. * core_init() is now called on every switch so only call the
  487. * following for the first time through
  488. */
  489. if (select_phy) {
  490. dev_dbg(hsotg->dev, "FS PHY selected\n");
  491. usbcfg = readl(hsotg->regs + GUSBCFG);
  492. usbcfg |= GUSBCFG_PHYSEL;
  493. writel(usbcfg, hsotg->regs + GUSBCFG);
  494. /* Reset after a PHY select */
  495. retval = dwc2_core_reset(hsotg);
  496. if (retval) {
  497. dev_err(hsotg->dev, "%s() Reset failed, aborting",
  498. __func__);
  499. return retval;
  500. }
  501. }
  502. /*
  503. * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
  504. * do this on HNP Dev/Host mode switches (done in dev_init and
  505. * host_init).
  506. */
  507. if (dwc2_is_host_mode(hsotg))
  508. dwc2_init_fs_ls_pclk_sel(hsotg);
  509. if (hsotg->core_params->i2c_enable > 0) {
  510. dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
  511. /* Program GUSBCFG.OtgUtmiFsSel to I2C */
  512. usbcfg = readl(hsotg->regs + GUSBCFG);
  513. usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
  514. writel(usbcfg, hsotg->regs + GUSBCFG);
  515. /* Program GI2CCTL.I2CEn */
  516. i2cctl = readl(hsotg->regs + GI2CCTL);
  517. i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
  518. i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
  519. i2cctl &= ~GI2CCTL_I2CEN;
  520. writel(i2cctl, hsotg->regs + GI2CCTL);
  521. i2cctl |= GI2CCTL_I2CEN;
  522. writel(i2cctl, hsotg->regs + GI2CCTL);
  523. }
  524. return retval;
  525. }
  526. static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
  527. {
  528. u32 usbcfg;
  529. int retval = 0;
  530. if (!select_phy)
  531. return 0;
  532. usbcfg = readl(hsotg->regs + GUSBCFG);
  533. /*
  534. * HS PHY parameters. These parameters are preserved during soft reset
  535. * so only program the first time. Do a soft reset immediately after
  536. * setting phyif.
  537. */
  538. switch (hsotg->core_params->phy_type) {
  539. case DWC2_PHY_TYPE_PARAM_ULPI:
  540. /* ULPI interface */
  541. dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
  542. usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
  543. usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
  544. if (hsotg->core_params->phy_ulpi_ddr > 0)
  545. usbcfg |= GUSBCFG_DDRSEL;
  546. break;
  547. case DWC2_PHY_TYPE_PARAM_UTMI:
  548. /* UTMI+ interface */
  549. dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
  550. usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
  551. if (hsotg->core_params->phy_utmi_width == 16)
  552. usbcfg |= GUSBCFG_PHYIF16;
  553. break;
  554. default:
  555. dev_err(hsotg->dev, "FS PHY selected at HS!\n");
  556. break;
  557. }
  558. writel(usbcfg, hsotg->regs + GUSBCFG);
  559. /* Reset after setting the PHY parameters */
  560. retval = dwc2_core_reset(hsotg);
  561. if (retval) {
  562. dev_err(hsotg->dev, "%s() Reset failed, aborting",
  563. __func__);
  564. return retval;
  565. }
  566. return retval;
  567. }
  568. static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
  569. {
  570. u32 usbcfg;
  571. int retval = 0;
  572. if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
  573. hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
  574. /* If FS mode with FS PHY */
  575. retval = dwc2_fs_phy_init(hsotg, select_phy);
  576. if (retval)
  577. return retval;
  578. } else {
  579. /* High speed PHY */
  580. retval = dwc2_hs_phy_init(hsotg, select_phy);
  581. if (retval)
  582. return retval;
  583. }
  584. if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
  585. hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
  586. hsotg->core_params->ulpi_fs_ls > 0) {
  587. dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
  588. usbcfg = readl(hsotg->regs + GUSBCFG);
  589. usbcfg |= GUSBCFG_ULPI_FS_LS;
  590. usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
  591. writel(usbcfg, hsotg->regs + GUSBCFG);
  592. } else {
  593. usbcfg = readl(hsotg->regs + GUSBCFG);
  594. usbcfg &= ~GUSBCFG_ULPI_FS_LS;
  595. usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
  596. writel(usbcfg, hsotg->regs + GUSBCFG);
  597. }
  598. return retval;
  599. }
  600. static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
  601. {
  602. u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
  603. switch (hsotg->hw_params.arch) {
  604. case GHWCFG2_EXT_DMA_ARCH:
  605. dev_err(hsotg->dev, "External DMA Mode not supported\n");
  606. return -EINVAL;
  607. case GHWCFG2_INT_DMA_ARCH:
  608. dev_dbg(hsotg->dev, "Internal DMA Mode\n");
  609. if (hsotg->core_params->ahbcfg != -1) {
  610. ahbcfg &= GAHBCFG_CTRL_MASK;
  611. ahbcfg |= hsotg->core_params->ahbcfg &
  612. ~GAHBCFG_CTRL_MASK;
  613. }
  614. break;
  615. case GHWCFG2_SLAVE_ONLY_ARCH:
  616. default:
  617. dev_dbg(hsotg->dev, "Slave Only Mode\n");
  618. break;
  619. }
  620. dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
  621. hsotg->core_params->dma_enable,
  622. hsotg->core_params->dma_desc_enable);
  623. if (hsotg->core_params->dma_enable > 0) {
  624. if (hsotg->core_params->dma_desc_enable > 0)
  625. dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
  626. else
  627. dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
  628. } else {
  629. dev_dbg(hsotg->dev, "Using Slave mode\n");
  630. hsotg->core_params->dma_desc_enable = 0;
  631. }
  632. if (hsotg->core_params->dma_enable > 0)
  633. ahbcfg |= GAHBCFG_DMA_EN;
  634. writel(ahbcfg, hsotg->regs + GAHBCFG);
  635. return 0;
  636. }
  637. static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
  638. {
  639. u32 usbcfg;
  640. usbcfg = readl(hsotg->regs + GUSBCFG);
  641. usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
  642. switch (hsotg->hw_params.op_mode) {
  643. case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
  644. if (hsotg->core_params->otg_cap ==
  645. DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
  646. usbcfg |= GUSBCFG_HNPCAP;
  647. if (hsotg->core_params->otg_cap !=
  648. DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
  649. usbcfg |= GUSBCFG_SRPCAP;
  650. break;
  651. case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
  652. case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
  653. case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
  654. if (hsotg->core_params->otg_cap !=
  655. DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
  656. usbcfg |= GUSBCFG_SRPCAP;
  657. break;
  658. case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
  659. case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
  660. case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
  661. default:
  662. break;
  663. }
  664. writel(usbcfg, hsotg->regs + GUSBCFG);
  665. }
  666. /**
  667. * dwc2_core_init() - Initializes the DWC_otg controller registers and
  668. * prepares the core for device mode or host mode operation
  669. *
  670. * @hsotg: Programming view of the DWC_otg controller
  671. * @select_phy: If true then also set the Phy type
  672. * @irq: If >= 0, the irq to register
  673. */
  674. int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
  675. {
  676. u32 usbcfg, otgctl;
  677. int retval;
  678. dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
  679. usbcfg = readl(hsotg->regs + GUSBCFG);
  680. /* Set ULPI External VBUS bit if needed */
  681. usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
  682. if (hsotg->core_params->phy_ulpi_ext_vbus ==
  683. DWC2_PHY_ULPI_EXTERNAL_VBUS)
  684. usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
  685. /* Set external TS Dline pulsing bit if needed */
  686. usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
  687. if (hsotg->core_params->ts_dline > 0)
  688. usbcfg |= GUSBCFG_TERMSELDLPULSE;
  689. writel(usbcfg, hsotg->regs + GUSBCFG);
  690. /* Reset the Controller */
  691. retval = dwc2_core_reset(hsotg);
  692. if (retval) {
  693. dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
  694. __func__);
  695. return retval;
  696. }
  697. /*
  698. * This needs to happen in FS mode before any other programming occurs
  699. */
  700. retval = dwc2_phy_init(hsotg, select_phy);
  701. if (retval)
  702. return retval;
  703. /* Program the GAHBCFG Register */
  704. retval = dwc2_gahbcfg_init(hsotg);
  705. if (retval)
  706. return retval;
  707. /* Program the GUSBCFG register */
  708. dwc2_gusbcfg_init(hsotg);
  709. /* Program the GOTGCTL register */
  710. otgctl = readl(hsotg->regs + GOTGCTL);
  711. otgctl &= ~GOTGCTL_OTGVER;
  712. if (hsotg->core_params->otg_ver > 0)
  713. otgctl |= GOTGCTL_OTGVER;
  714. writel(otgctl, hsotg->regs + GOTGCTL);
  715. dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
  716. /* Clear the SRP success bit for FS-I2c */
  717. hsotg->srp_success = 0;
  718. /* Enable common interrupts */
  719. dwc2_enable_common_interrupts(hsotg);
  720. /*
  721. * Do device or host initialization based on mode during PCD and
  722. * HCD initialization
  723. */
  724. if (dwc2_is_host_mode(hsotg)) {
  725. dev_dbg(hsotg->dev, "Host Mode\n");
  726. hsotg->op_state = OTG_STATE_A_HOST;
  727. } else {
  728. dev_dbg(hsotg->dev, "Device Mode\n");
  729. hsotg->op_state = OTG_STATE_B_PERIPHERAL;
  730. }
  731. return 0;
  732. }
  733. /**
  734. * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
  735. *
  736. * @hsotg: Programming view of DWC_otg controller
  737. */
  738. void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
  739. {
  740. u32 intmsk;
  741. dev_dbg(hsotg->dev, "%s()\n", __func__);
  742. /* Disable all interrupts */
  743. writel(0, hsotg->regs + GINTMSK);
  744. writel(0, hsotg->regs + HAINTMSK);
  745. /* Enable the common interrupts */
  746. dwc2_enable_common_interrupts(hsotg);
  747. /* Enable host mode interrupts without disturbing common interrupts */
  748. intmsk = readl(hsotg->regs + GINTMSK);
  749. intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
  750. writel(intmsk, hsotg->regs + GINTMSK);
  751. }
  752. /**
  753. * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
  754. *
  755. * @hsotg: Programming view of DWC_otg controller
  756. */
  757. void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
  758. {
  759. u32 intmsk = readl(hsotg->regs + GINTMSK);
  760. /* Disable host mode interrupts without disturbing common interrupts */
  761. intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
  762. GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
  763. writel(intmsk, hsotg->regs + GINTMSK);
  764. }
  765. /*
  766. * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
  767. * For system that have a total fifo depth that is smaller than the default
  768. * RX + TX fifo size.
  769. *
  770. * @hsotg: Programming view of DWC_otg controller
  771. */
  772. static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
  773. {
  774. struct dwc2_core_params *params = hsotg->core_params;
  775. struct dwc2_hw_params *hw = &hsotg->hw_params;
  776. u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
  777. total_fifo_size = hw->total_fifo_size;
  778. rxfsiz = params->host_rx_fifo_size;
  779. nptxfsiz = params->host_nperio_tx_fifo_size;
  780. ptxfsiz = params->host_perio_tx_fifo_size;
  781. /*
  782. * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
  783. * allocation with support for high bandwidth endpoints. Synopsys
  784. * defines MPS(Max Packet size) for a periodic EP=1024, and for
  785. * non-periodic as 512.
  786. */
  787. if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
  788. /*
  789. * For Buffer DMA mode/Scatter Gather DMA mode
  790. * 2 * ((Largest Packet size / 4) + 1 + 1) + n
  791. * with n = number of host channel.
  792. * 2 * ((1024/4) + 2) = 516
  793. */
  794. rxfsiz = 516 + hw->host_channels;
  795. /*
  796. * min non-periodic tx fifo depth
  797. * 2 * (largest non-periodic USB packet used / 4)
  798. * 2 * (512/4) = 256
  799. */
  800. nptxfsiz = 256;
  801. /*
  802. * min periodic tx fifo depth
  803. * (largest packet size*MC)/4
  804. * (1024 * 3)/4 = 768
  805. */
  806. ptxfsiz = 768;
  807. params->host_rx_fifo_size = rxfsiz;
  808. params->host_nperio_tx_fifo_size = nptxfsiz;
  809. params->host_perio_tx_fifo_size = ptxfsiz;
  810. }
  811. /*
  812. * If the summation of RX, NPTX and PTX fifo sizes is still
  813. * bigger than the total_fifo_size, then we have a problem.
  814. *
  815. * We won't be able to allocate as many endpoints. Right now,
  816. * we're just printing an error message, but ideally this FIFO
  817. * allocation algorithm would be improved in the future.
  818. *
  819. * FIXME improve this FIFO allocation algorithm.
  820. */
  821. if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
  822. dev_err(hsotg->dev, "invalid fifo sizes\n");
  823. }
  824. static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
  825. {
  826. struct dwc2_core_params *params = hsotg->core_params;
  827. u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
  828. if (!params->enable_dynamic_fifo)
  829. return;
  830. dwc2_calculate_dynamic_fifo(hsotg);
  831. /* Rx FIFO */
  832. grxfsiz = readl(hsotg->regs + GRXFSIZ);
  833. dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
  834. grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
  835. grxfsiz |= params->host_rx_fifo_size <<
  836. GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
  837. writel(grxfsiz, hsotg->regs + GRXFSIZ);
  838. dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ));
  839. /* Non-periodic Tx FIFO */
  840. dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
  841. readl(hsotg->regs + GNPTXFSIZ));
  842. nptxfsiz = params->host_nperio_tx_fifo_size <<
  843. FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
  844. nptxfsiz |= params->host_rx_fifo_size <<
  845. FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
  846. writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
  847. dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
  848. readl(hsotg->regs + GNPTXFSIZ));
  849. /* Periodic Tx FIFO */
  850. dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
  851. readl(hsotg->regs + HPTXFSIZ));
  852. hptxfsiz = params->host_perio_tx_fifo_size <<
  853. FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
  854. hptxfsiz |= (params->host_rx_fifo_size +
  855. params->host_nperio_tx_fifo_size) <<
  856. FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
  857. writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
  858. dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
  859. readl(hsotg->regs + HPTXFSIZ));
  860. if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
  861. hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
  862. /*
  863. * Global DFIFOCFG calculation for Host mode -
  864. * include RxFIFO, NPTXFIFO and HPTXFIFO
  865. */
  866. dfifocfg = readl(hsotg->regs + GDFIFOCFG);
  867. dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
  868. dfifocfg |= (params->host_rx_fifo_size +
  869. params->host_nperio_tx_fifo_size +
  870. params->host_perio_tx_fifo_size) <<
  871. GDFIFOCFG_EPINFOBASE_SHIFT &
  872. GDFIFOCFG_EPINFOBASE_MASK;
  873. writel(dfifocfg, hsotg->regs + GDFIFOCFG);
  874. }
  875. }
  876. /**
  877. * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
  878. * Host mode
  879. *
  880. * @hsotg: Programming view of DWC_otg controller
  881. *
  882. * This function flushes the Tx and Rx FIFOs and flushes any entries in the
  883. * request queues. Host channels are reset to ensure that they are ready for
  884. * performing transfers.
  885. */
  886. void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
  887. {
  888. u32 hcfg, hfir, otgctl;
  889. dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
  890. /* Restart the Phy Clock */
  891. writel(0, hsotg->regs + PCGCTL);
  892. /* Initialize Host Configuration Register */
  893. dwc2_init_fs_ls_pclk_sel(hsotg);
  894. if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
  895. hcfg = readl(hsotg->regs + HCFG);
  896. hcfg |= HCFG_FSLSSUPP;
  897. writel(hcfg, hsotg->regs + HCFG);
  898. }
  899. /*
  900. * This bit allows dynamic reloading of the HFIR register during
  901. * runtime. This bit needs to be programmed during initial configuration
  902. * and its value must not be changed during runtime.
  903. */
  904. if (hsotg->core_params->reload_ctl > 0) {
  905. hfir = readl(hsotg->regs + HFIR);
  906. hfir |= HFIR_RLDCTRL;
  907. writel(hfir, hsotg->regs + HFIR);
  908. }
  909. if (hsotg->core_params->dma_desc_enable > 0) {
  910. u32 op_mode = hsotg->hw_params.op_mode;
  911. if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
  912. !hsotg->hw_params.dma_desc_enable ||
  913. op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
  914. op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
  915. op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
  916. dev_err(hsotg->dev,
  917. "Hardware does not support descriptor DMA mode -\n");
  918. dev_err(hsotg->dev,
  919. "falling back to buffer DMA mode.\n");
  920. hsotg->core_params->dma_desc_enable = 0;
  921. } else {
  922. hcfg = readl(hsotg->regs + HCFG);
  923. hcfg |= HCFG_DESCDMA;
  924. writel(hcfg, hsotg->regs + HCFG);
  925. }
  926. }
  927. /* Configure data FIFO sizes */
  928. dwc2_config_fifos(hsotg);
  929. /* TODO - check this */
  930. /* Clear Host Set HNP Enable in the OTG Control Register */
  931. otgctl = readl(hsotg->regs + GOTGCTL);
  932. otgctl &= ~GOTGCTL_HSTSETHNPEN;
  933. writel(otgctl, hsotg->regs + GOTGCTL);
  934. /* Make sure the FIFOs are flushed */
  935. dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
  936. dwc2_flush_rx_fifo(hsotg);
  937. /* Clear Host Set HNP Enable in the OTG Control Register */
  938. otgctl = readl(hsotg->regs + GOTGCTL);
  939. otgctl &= ~GOTGCTL_HSTSETHNPEN;
  940. writel(otgctl, hsotg->regs + GOTGCTL);
  941. if (hsotg->core_params->dma_desc_enable <= 0) {
  942. int num_channels, i;
  943. u32 hcchar;
  944. /* Flush out any leftover queued requests */
  945. num_channels = hsotg->core_params->host_channels;
  946. for (i = 0; i < num_channels; i++) {
  947. hcchar = readl(hsotg->regs + HCCHAR(i));
  948. hcchar &= ~HCCHAR_CHENA;
  949. hcchar |= HCCHAR_CHDIS;
  950. hcchar &= ~HCCHAR_EPDIR;
  951. writel(hcchar, hsotg->regs + HCCHAR(i));
  952. }
  953. /* Halt all channels to put them into a known state */
  954. for (i = 0; i < num_channels; i++) {
  955. int count = 0;
  956. hcchar = readl(hsotg->regs + HCCHAR(i));
  957. hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
  958. hcchar &= ~HCCHAR_EPDIR;
  959. writel(hcchar, hsotg->regs + HCCHAR(i));
  960. dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
  961. __func__, i);
  962. do {
  963. hcchar = readl(hsotg->regs + HCCHAR(i));
  964. if (++count > 1000) {
  965. dev_err(hsotg->dev,
  966. "Unable to clear enable on channel %d\n",
  967. i);
  968. break;
  969. }
  970. udelay(1);
  971. } while (hcchar & HCCHAR_CHENA);
  972. }
  973. }
  974. /* Turn on the vbus power */
  975. dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
  976. if (hsotg->op_state == OTG_STATE_A_HOST) {
  977. u32 hprt0 = dwc2_read_hprt0(hsotg);
  978. dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
  979. !!(hprt0 & HPRT0_PWR));
  980. if (!(hprt0 & HPRT0_PWR)) {
  981. hprt0 |= HPRT0_PWR;
  982. writel(hprt0, hsotg->regs + HPRT0);
  983. }
  984. }
  985. dwc2_enable_host_interrupts(hsotg);
  986. }
  987. static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
  988. struct dwc2_host_chan *chan)
  989. {
  990. u32 hcintmsk = HCINTMSK_CHHLTD;
  991. switch (chan->ep_type) {
  992. case USB_ENDPOINT_XFER_CONTROL:
  993. case USB_ENDPOINT_XFER_BULK:
  994. dev_vdbg(hsotg->dev, "control/bulk\n");
  995. hcintmsk |= HCINTMSK_XFERCOMPL;
  996. hcintmsk |= HCINTMSK_STALL;
  997. hcintmsk |= HCINTMSK_XACTERR;
  998. hcintmsk |= HCINTMSK_DATATGLERR;
  999. if (chan->ep_is_in) {
  1000. hcintmsk |= HCINTMSK_BBLERR;
  1001. } else {
  1002. hcintmsk |= HCINTMSK_NAK;
  1003. hcintmsk |= HCINTMSK_NYET;
  1004. if (chan->do_ping)
  1005. hcintmsk |= HCINTMSK_ACK;
  1006. }
  1007. if (chan->do_split) {
  1008. hcintmsk |= HCINTMSK_NAK;
  1009. if (chan->complete_split)
  1010. hcintmsk |= HCINTMSK_NYET;
  1011. else
  1012. hcintmsk |= HCINTMSK_ACK;
  1013. }
  1014. if (chan->error_state)
  1015. hcintmsk |= HCINTMSK_ACK;
  1016. break;
  1017. case USB_ENDPOINT_XFER_INT:
  1018. if (dbg_perio())
  1019. dev_vdbg(hsotg->dev, "intr\n");
  1020. hcintmsk |= HCINTMSK_XFERCOMPL;
  1021. hcintmsk |= HCINTMSK_NAK;
  1022. hcintmsk |= HCINTMSK_STALL;
  1023. hcintmsk |= HCINTMSK_XACTERR;
  1024. hcintmsk |= HCINTMSK_DATATGLERR;
  1025. hcintmsk |= HCINTMSK_FRMOVRUN;
  1026. if (chan->ep_is_in)
  1027. hcintmsk |= HCINTMSK_BBLERR;
  1028. if (chan->error_state)
  1029. hcintmsk |= HCINTMSK_ACK;
  1030. if (chan->do_split) {
  1031. if (chan->complete_split)
  1032. hcintmsk |= HCINTMSK_NYET;
  1033. else
  1034. hcintmsk |= HCINTMSK_ACK;
  1035. }
  1036. break;
  1037. case USB_ENDPOINT_XFER_ISOC:
  1038. if (dbg_perio())
  1039. dev_vdbg(hsotg->dev, "isoc\n");
  1040. hcintmsk |= HCINTMSK_XFERCOMPL;
  1041. hcintmsk |= HCINTMSK_FRMOVRUN;
  1042. hcintmsk |= HCINTMSK_ACK;
  1043. if (chan->ep_is_in) {
  1044. hcintmsk |= HCINTMSK_XACTERR;
  1045. hcintmsk |= HCINTMSK_BBLERR;
  1046. }
  1047. break;
  1048. default:
  1049. dev_err(hsotg->dev, "## Unknown EP type ##\n");
  1050. break;
  1051. }
  1052. writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
  1053. if (dbg_hc(chan))
  1054. dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
  1055. }
  1056. static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
  1057. struct dwc2_host_chan *chan)
  1058. {
  1059. u32 hcintmsk = HCINTMSK_CHHLTD;
  1060. /*
  1061. * For Descriptor DMA mode core halts the channel on AHB error.
  1062. * Interrupt is not required.
  1063. */
  1064. if (hsotg->core_params->dma_desc_enable <= 0) {
  1065. if (dbg_hc(chan))
  1066. dev_vdbg(hsotg->dev, "desc DMA disabled\n");
  1067. hcintmsk |= HCINTMSK_AHBERR;
  1068. } else {
  1069. if (dbg_hc(chan))
  1070. dev_vdbg(hsotg->dev, "desc DMA enabled\n");
  1071. if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
  1072. hcintmsk |= HCINTMSK_XFERCOMPL;
  1073. }
  1074. if (chan->error_state && !chan->do_split &&
  1075. chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
  1076. if (dbg_hc(chan))
  1077. dev_vdbg(hsotg->dev, "setting ACK\n");
  1078. hcintmsk |= HCINTMSK_ACK;
  1079. if (chan->ep_is_in) {
  1080. hcintmsk |= HCINTMSK_DATATGLERR;
  1081. if (chan->ep_type != USB_ENDPOINT_XFER_INT)
  1082. hcintmsk |= HCINTMSK_NAK;
  1083. }
  1084. }
  1085. writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
  1086. if (dbg_hc(chan))
  1087. dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
  1088. }
  1089. static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
  1090. struct dwc2_host_chan *chan)
  1091. {
  1092. u32 intmsk;
  1093. if (hsotg->core_params->dma_enable > 0) {
  1094. if (dbg_hc(chan))
  1095. dev_vdbg(hsotg->dev, "DMA enabled\n");
  1096. dwc2_hc_enable_dma_ints(hsotg, chan);
  1097. } else {
  1098. if (dbg_hc(chan))
  1099. dev_vdbg(hsotg->dev, "DMA disabled\n");
  1100. dwc2_hc_enable_slave_ints(hsotg, chan);
  1101. }
  1102. /* Enable the top level host channel interrupt */
  1103. intmsk = readl(hsotg->regs + HAINTMSK);
  1104. intmsk |= 1 << chan->hc_num;
  1105. writel(intmsk, hsotg->regs + HAINTMSK);
  1106. if (dbg_hc(chan))
  1107. dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
  1108. /* Make sure host channel interrupts are enabled */
  1109. intmsk = readl(hsotg->regs + GINTMSK);
  1110. intmsk |= GINTSTS_HCHINT;
  1111. writel(intmsk, hsotg->regs + GINTMSK);
  1112. if (dbg_hc(chan))
  1113. dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
  1114. }
  1115. /**
  1116. * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
  1117. * a specific endpoint
  1118. *
  1119. * @hsotg: Programming view of DWC_otg controller
  1120. * @chan: Information needed to initialize the host channel
  1121. *
  1122. * The HCCHARn register is set up with the characteristics specified in chan.
  1123. * Host channel interrupts that may need to be serviced while this transfer is
  1124. * in progress are enabled.
  1125. */
  1126. void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
  1127. {
  1128. u8 hc_num = chan->hc_num;
  1129. u32 hcintmsk;
  1130. u32 hcchar;
  1131. u32 hcsplt = 0;
  1132. if (dbg_hc(chan))
  1133. dev_vdbg(hsotg->dev, "%s()\n", __func__);
  1134. /* Clear old interrupt conditions for this host channel */
  1135. hcintmsk = 0xffffffff;
  1136. hcintmsk &= ~HCINTMSK_RESERVED14_31;
  1137. writel(hcintmsk, hsotg->regs + HCINT(hc_num));
  1138. /* Enable channel interrupts required for this transfer */
  1139. dwc2_hc_enable_ints(hsotg, chan);
  1140. /*
  1141. * Program the HCCHARn register with the endpoint characteristics for
  1142. * the current transfer
  1143. */
  1144. hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
  1145. hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
  1146. if (chan->ep_is_in)
  1147. hcchar |= HCCHAR_EPDIR;
  1148. if (chan->speed == USB_SPEED_LOW)
  1149. hcchar |= HCCHAR_LSPDDEV;
  1150. hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
  1151. hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
  1152. writel(hcchar, hsotg->regs + HCCHAR(hc_num));
  1153. if (dbg_hc(chan)) {
  1154. dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
  1155. hc_num, hcchar);
  1156. dev_vdbg(hsotg->dev, "%s: Channel %d\n",
  1157. __func__, hc_num);
  1158. dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
  1159. chan->dev_addr);
  1160. dev_vdbg(hsotg->dev, " Ep Num: %d\n",
  1161. chan->ep_num);
  1162. dev_vdbg(hsotg->dev, " Is In: %d\n",
  1163. chan->ep_is_in);
  1164. dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
  1165. chan->speed == USB_SPEED_LOW);
  1166. dev_vdbg(hsotg->dev, " Ep Type: %d\n",
  1167. chan->ep_type);
  1168. dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
  1169. chan->max_packet);
  1170. }
  1171. /* Program the HCSPLT register for SPLITs */
  1172. if (chan->do_split) {
  1173. if (dbg_hc(chan))
  1174. dev_vdbg(hsotg->dev,
  1175. "Programming HC %d with split --> %s\n",
  1176. hc_num,
  1177. chan->complete_split ? "CSPLIT" : "SSPLIT");
  1178. if (chan->complete_split)
  1179. hcsplt |= HCSPLT_COMPSPLT;
  1180. hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
  1181. HCSPLT_XACTPOS_MASK;
  1182. hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
  1183. HCSPLT_HUBADDR_MASK;
  1184. hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
  1185. HCSPLT_PRTADDR_MASK;
  1186. if (dbg_hc(chan)) {
  1187. dev_vdbg(hsotg->dev, " comp split %d\n",
  1188. chan->complete_split);
  1189. dev_vdbg(hsotg->dev, " xact pos %d\n",
  1190. chan->xact_pos);
  1191. dev_vdbg(hsotg->dev, " hub addr %d\n",
  1192. chan->hub_addr);
  1193. dev_vdbg(hsotg->dev, " hub port %d\n",
  1194. chan->hub_port);
  1195. dev_vdbg(hsotg->dev, " is_in %d\n",
  1196. chan->ep_is_in);
  1197. dev_vdbg(hsotg->dev, " Max Pkt %d\n",
  1198. chan->max_packet);
  1199. dev_vdbg(hsotg->dev, " xferlen %d\n",
  1200. chan->xfer_len);
  1201. }
  1202. }
  1203. writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
  1204. }
  1205. /**
  1206. * dwc2_hc_halt() - Attempts to halt a host channel
  1207. *
  1208. * @hsotg: Controller register interface
  1209. * @chan: Host channel to halt
  1210. * @halt_status: Reason for halting the channel
  1211. *
  1212. * This function should only be called in Slave mode or to abort a transfer in
  1213. * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
  1214. * controller halts the channel when the transfer is complete or a condition
  1215. * occurs that requires application intervention.
  1216. *
  1217. * In slave mode, checks for a free request queue entry, then sets the Channel
  1218. * Enable and Channel Disable bits of the Host Channel Characteristics
  1219. * register of the specified channel to intiate the halt. If there is no free
  1220. * request queue entry, sets only the Channel Disable bit of the HCCHARn
  1221. * register to flush requests for this channel. In the latter case, sets a
  1222. * flag to indicate that the host channel needs to be halted when a request
  1223. * queue slot is open.
  1224. *
  1225. * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
  1226. * HCCHARn register. The controller ensures there is space in the request
  1227. * queue before submitting the halt request.
  1228. *
  1229. * Some time may elapse before the core flushes any posted requests for this
  1230. * host channel and halts. The Channel Halted interrupt handler completes the
  1231. * deactivation of the host channel.
  1232. */
  1233. void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
  1234. enum dwc2_halt_status halt_status)
  1235. {
  1236. u32 nptxsts, hptxsts, hcchar;
  1237. if (dbg_hc(chan))
  1238. dev_vdbg(hsotg->dev, "%s()\n", __func__);
  1239. if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
  1240. dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
  1241. if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
  1242. halt_status == DWC2_HC_XFER_AHB_ERR) {
  1243. /*
  1244. * Disable all channel interrupts except Ch Halted. The QTD
  1245. * and QH state associated with this transfer has been cleared
  1246. * (in the case of URB_DEQUEUE), so the channel needs to be
  1247. * shut down carefully to prevent crashes.
  1248. */
  1249. u32 hcintmsk = HCINTMSK_CHHLTD;
  1250. dev_vdbg(hsotg->dev, "dequeue/error\n");
  1251. writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
  1252. /*
  1253. * Make sure no other interrupts besides halt are currently
  1254. * pending. Handling another interrupt could cause a crash due
  1255. * to the QTD and QH state.
  1256. */
  1257. writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
  1258. /*
  1259. * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
  1260. * even if the channel was already halted for some other
  1261. * reason
  1262. */
  1263. chan->halt_status = halt_status;
  1264. hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
  1265. if (!(hcchar & HCCHAR_CHENA)) {
  1266. /*
  1267. * The channel is either already halted or it hasn't
  1268. * started yet. In DMA mode, the transfer may halt if
  1269. * it finishes normally or a condition occurs that
  1270. * requires driver intervention. Don't want to halt
  1271. * the channel again. In either Slave or DMA mode,
  1272. * it's possible that the transfer has been assigned
  1273. * to a channel, but not started yet when an URB is
  1274. * dequeued. Don't want to halt a channel that hasn't
  1275. * started yet.
  1276. */
  1277. return;
  1278. }
  1279. }
  1280. if (chan->halt_pending) {
  1281. /*
  1282. * A halt has already been issued for this channel. This might
  1283. * happen when a transfer is aborted by a higher level in
  1284. * the stack.
  1285. */
  1286. dev_vdbg(hsotg->dev,
  1287. "*** %s: Channel %d, chan->halt_pending already set ***\n",
  1288. __func__, chan->hc_num);
  1289. return;
  1290. }
  1291. hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
  1292. /* No need to set the bit in DDMA for disabling the channel */
  1293. /* TODO check it everywhere channel is disabled */
  1294. if (hsotg->core_params->dma_desc_enable <= 0) {
  1295. if (dbg_hc(chan))
  1296. dev_vdbg(hsotg->dev, "desc DMA disabled\n");
  1297. hcchar |= HCCHAR_CHENA;
  1298. } else {
  1299. if (dbg_hc(chan))
  1300. dev_dbg(hsotg->dev, "desc DMA enabled\n");
  1301. }
  1302. hcchar |= HCCHAR_CHDIS;
  1303. if (hsotg->core_params->dma_enable <= 0) {
  1304. if (dbg_hc(chan))
  1305. dev_vdbg(hsotg->dev, "DMA not enabled\n");
  1306. hcchar |= HCCHAR_CHENA;
  1307. /* Check for space in the request queue to issue the halt */
  1308. if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
  1309. chan->ep_type == USB_ENDPOINT_XFER_BULK) {
  1310. dev_vdbg(hsotg->dev, "control/bulk\n");
  1311. nptxsts = readl(hsotg->regs + GNPTXSTS);
  1312. if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
  1313. dev_vdbg(hsotg->dev, "Disabling channel\n");
  1314. hcchar &= ~HCCHAR_CHENA;
  1315. }
  1316. } else {
  1317. if (dbg_perio())
  1318. dev_vdbg(hsotg->dev, "isoc/intr\n");
  1319. hptxsts = readl(hsotg->regs + HPTXSTS);
  1320. if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
  1321. hsotg->queuing_high_bandwidth) {
  1322. if (dbg_perio())
  1323. dev_vdbg(hsotg->dev, "Disabling channel\n");
  1324. hcchar &= ~HCCHAR_CHENA;
  1325. }
  1326. }
  1327. } else {
  1328. if (dbg_hc(chan))
  1329. dev_vdbg(hsotg->dev, "DMA enabled\n");
  1330. }
  1331. writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
  1332. chan->halt_status = halt_status;
  1333. if (hcchar & HCCHAR_CHENA) {
  1334. if (dbg_hc(chan))
  1335. dev_vdbg(hsotg->dev, "Channel enabled\n");
  1336. chan->halt_pending = 1;
  1337. chan->halt_on_queue = 0;
  1338. } else {
  1339. if (dbg_hc(chan))
  1340. dev_vdbg(hsotg->dev, "Channel disabled\n");
  1341. chan->halt_on_queue = 1;
  1342. }
  1343. if (dbg_hc(chan)) {
  1344. dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
  1345. chan->hc_num);
  1346. dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
  1347. hcchar);
  1348. dev_vdbg(hsotg->dev, " halt_pending: %d\n",
  1349. chan->halt_pending);
  1350. dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
  1351. chan->halt_on_queue);
  1352. dev_vdbg(hsotg->dev, " halt_status: %d\n",
  1353. chan->halt_status);
  1354. }
  1355. }
  1356. /**
  1357. * dwc2_hc_cleanup() - Clears the transfer state for a host channel
  1358. *
  1359. * @hsotg: Programming view of DWC_otg controller
  1360. * @chan: Identifies the host channel to clean up
  1361. *
  1362. * This function is normally called after a transfer is done and the host
  1363. * channel is being released
  1364. */
  1365. void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
  1366. {
  1367. u32 hcintmsk;
  1368. chan->xfer_started = 0;
  1369. /*
  1370. * Clear channel interrupt enables and any unhandled channel interrupt
  1371. * conditions
  1372. */
  1373. writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
  1374. hcintmsk = 0xffffffff;
  1375. hcintmsk &= ~HCINTMSK_RESERVED14_31;
  1376. writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
  1377. }
  1378. /**
  1379. * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
  1380. * which frame a periodic transfer should occur
  1381. *
  1382. * @hsotg: Programming view of DWC_otg controller
  1383. * @chan: Identifies the host channel to set up and its properties
  1384. * @hcchar: Current value of the HCCHAR register for the specified host channel
  1385. *
  1386. * This function has no effect on non-periodic transfers
  1387. */
  1388. static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
  1389. struct dwc2_host_chan *chan, u32 *hcchar)
  1390. {
  1391. if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
  1392. chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
  1393. /* 1 if _next_ frame is odd, 0 if it's even */
  1394. if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
  1395. *hcchar |= HCCHAR_ODDFRM;
  1396. }
  1397. }
  1398. static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
  1399. {
  1400. /* Set up the initial PID for the transfer */
  1401. if (chan->speed == USB_SPEED_HIGH) {
  1402. if (chan->ep_is_in) {
  1403. if (chan->multi_count == 1)
  1404. chan->data_pid_start = DWC2_HC_PID_DATA0;
  1405. else if (chan->multi_count == 2)
  1406. chan->data_pid_start = DWC2_HC_PID_DATA1;
  1407. else
  1408. chan->data_pid_start = DWC2_HC_PID_DATA2;
  1409. } else {
  1410. if (chan->multi_count == 1)
  1411. chan->data_pid_start = DWC2_HC_PID_DATA0;
  1412. else
  1413. chan->data_pid_start = DWC2_HC_PID_MDATA;
  1414. }
  1415. } else {
  1416. chan->data_pid_start = DWC2_HC_PID_DATA0;
  1417. }
  1418. }
  1419. /**
  1420. * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
  1421. * the Host Channel
  1422. *
  1423. * @hsotg: Programming view of DWC_otg controller
  1424. * @chan: Information needed to initialize the host channel
  1425. *
  1426. * This function should only be called in Slave mode. For a channel associated
  1427. * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
  1428. * associated with a periodic EP, the periodic Tx FIFO is written.
  1429. *
  1430. * Upon return the xfer_buf and xfer_count fields in chan are incremented by
  1431. * the number of bytes written to the Tx FIFO.
  1432. */
  1433. static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
  1434. struct dwc2_host_chan *chan)
  1435. {
  1436. u32 i;
  1437. u32 remaining_count;
  1438. u32 byte_count;
  1439. u32 dword_count;
  1440. u32 __iomem *data_fifo;
  1441. u32 *data_buf = (u32 *)chan->xfer_buf;
  1442. if (dbg_hc(chan))
  1443. dev_vdbg(hsotg->dev, "%s()\n", __func__);
  1444. data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
  1445. remaining_count = chan->xfer_len - chan->xfer_count;
  1446. if (remaining_count > chan->max_packet)
  1447. byte_count = chan->max_packet;
  1448. else
  1449. byte_count = remaining_count;
  1450. dword_count = (byte_count + 3) / 4;
  1451. if (((unsigned long)data_buf & 0x3) == 0) {
  1452. /* xfer_buf is DWORD aligned */
  1453. for (i = 0; i < dword_count; i++, data_buf++)
  1454. writel(*data_buf, data_fifo);
  1455. } else {
  1456. /* xfer_buf is not DWORD aligned */
  1457. for (i = 0; i < dword_count; i++, data_buf++) {
  1458. u32 data = data_buf[0] | data_buf[1] << 8 |
  1459. data_buf[2] << 16 | data_buf[3] << 24;
  1460. writel(data, data_fifo);
  1461. }
  1462. }
  1463. chan->xfer_count += byte_count;
  1464. chan->xfer_buf += byte_count;
  1465. }
  1466. /**
  1467. * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
  1468. * channel and starts the transfer
  1469. *
  1470. * @hsotg: Programming view of DWC_otg controller
  1471. * @chan: Information needed to initialize the host channel. The xfer_len value
  1472. * may be reduced to accommodate the max widths of the XferSize and
  1473. * PktCnt fields in the HCTSIZn register. The multi_count value may be
  1474. * changed to reflect the final xfer_len value.
  1475. *
  1476. * This function may be called in either Slave mode or DMA mode. In Slave mode,
  1477. * the caller must ensure that there is sufficient space in the request queue
  1478. * and Tx Data FIFO.
  1479. *
  1480. * For an OUT transfer in Slave mode, it loads a data packet into the
  1481. * appropriate FIFO. If necessary, additional data packets are loaded in the
  1482. * Host ISR.
  1483. *
  1484. * For an IN transfer in Slave mode, a data packet is requested. The data
  1485. * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
  1486. * additional data packets are requested in the Host ISR.
  1487. *
  1488. * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
  1489. * register along with a packet count of 1 and the channel is enabled. This
  1490. * causes a single PING transaction to occur. Other fields in HCTSIZ are
  1491. * simply set to 0 since no data transfer occurs in this case.
  1492. *
  1493. * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
  1494. * all the information required to perform the subsequent data transfer. In
  1495. * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
  1496. * controller performs the entire PING protocol, then starts the data
  1497. * transfer.
  1498. */
  1499. void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
  1500. struct dwc2_host_chan *chan)
  1501. {
  1502. u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
  1503. u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
  1504. u32 hcchar;
  1505. u32 hctsiz = 0;
  1506. u16 num_packets;
  1507. if (dbg_hc(chan))
  1508. dev_vdbg(hsotg->dev, "%s()\n", __func__);
  1509. if (chan->do_ping) {
  1510. if (hsotg->core_params->dma_enable <= 0) {
  1511. if (dbg_hc(chan))
  1512. dev_vdbg(hsotg->dev, "ping, no DMA\n");
  1513. dwc2_hc_do_ping(hsotg, chan);
  1514. chan->xfer_started = 1;
  1515. return;
  1516. } else {
  1517. if (dbg_hc(chan))
  1518. dev_vdbg(hsotg->dev, "ping, DMA\n");
  1519. hctsiz |= TSIZ_DOPNG;
  1520. }
  1521. }
  1522. if (chan->do_split) {
  1523. if (dbg_hc(chan))
  1524. dev_vdbg(hsotg->dev, "split\n");
  1525. num_packets = 1;
  1526. if (chan->complete_split && !chan->ep_is_in)
  1527. /*
  1528. * For CSPLIT OUT Transfer, set the size to 0 so the
  1529. * core doesn't expect any data written to the FIFO
  1530. */
  1531. chan->xfer_len = 0;
  1532. else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
  1533. chan->xfer_len = chan->max_packet;
  1534. else if (!chan->ep_is_in && chan->xfer_len > 188)
  1535. chan->xfer_len = 188;
  1536. hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
  1537. TSIZ_XFERSIZE_MASK;
  1538. } else {
  1539. if (dbg_hc(chan))
  1540. dev_vdbg(hsotg->dev, "no split\n");
  1541. /*
  1542. * Ensure that the transfer length and packet count will fit
  1543. * in the widths allocated for them in the HCTSIZn register
  1544. */
  1545. if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
  1546. chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
  1547. /*
  1548. * Make sure the transfer size is no larger than one
  1549. * (micro)frame's worth of data. (A check was done
  1550. * when the periodic transfer was accepted to ensure
  1551. * that a (micro)frame's worth of data can be
  1552. * programmed into a channel.)
  1553. */
  1554. u32 max_periodic_len =
  1555. chan->multi_count * chan->max_packet;
  1556. if (chan->xfer_len > max_periodic_len)
  1557. chan->xfer_len = max_periodic_len;
  1558. } else if (chan->xfer_len > max_hc_xfer_size) {
  1559. /*
  1560. * Make sure that xfer_len is a multiple of max packet
  1561. * size
  1562. */
  1563. chan->xfer_len =
  1564. max_hc_xfer_size - chan->max_packet + 1;
  1565. }
  1566. if (chan->xfer_len > 0) {
  1567. num_packets = (chan->xfer_len + chan->max_packet - 1) /
  1568. chan->max_packet;
  1569. if (num_packets > max_hc_pkt_count) {
  1570. num_packets = max_hc_pkt_count;
  1571. chan->xfer_len = num_packets * chan->max_packet;
  1572. }
  1573. } else {
  1574. /* Need 1 packet for transfer length of 0 */
  1575. num_packets = 1;
  1576. }
  1577. if (chan->ep_is_in)
  1578. /*
  1579. * Always program an integral # of max packets for IN
  1580. * transfers
  1581. */
  1582. chan->xfer_len = num_packets * chan->max_packet;
  1583. if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
  1584. chan->ep_type == USB_ENDPOINT_XFER_ISOC)
  1585. /*
  1586. * Make sure that the multi_count field matches the
  1587. * actual transfer length
  1588. */
  1589. chan->multi_count = num_packets;
  1590. if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
  1591. dwc2_set_pid_isoc(chan);
  1592. hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
  1593. TSIZ_XFERSIZE_MASK;
  1594. }
  1595. chan->start_pkt_count = num_packets;
  1596. hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
  1597. hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
  1598. TSIZ_SC_MC_PID_MASK;
  1599. writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
  1600. if (dbg_hc(chan)) {
  1601. dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
  1602. hctsiz, chan->hc_num);
  1603. dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
  1604. chan->hc_num);
  1605. dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
  1606. (hctsiz & TSIZ_XFERSIZE_MASK) >>
  1607. TSIZ_XFERSIZE_SHIFT);
  1608. dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
  1609. (hctsiz & TSIZ_PKTCNT_MASK) >>
  1610. TSIZ_PKTCNT_SHIFT);
  1611. dev_vdbg(hsotg->dev, " Start PID: %d\n",
  1612. (hctsiz & TSIZ_SC_MC_PID_MASK) >>
  1613. TSIZ_SC_MC_PID_SHIFT);
  1614. }
  1615. if (hsotg->core_params->dma_enable > 0) {
  1616. dma_addr_t dma_addr;
  1617. if (chan->align_buf) {
  1618. if (dbg_hc(chan))
  1619. dev_vdbg(hsotg->dev, "align_buf\n");
  1620. dma_addr = chan->align_buf;
  1621. } else {
  1622. dma_addr = chan->xfer_dma;
  1623. }
  1624. writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
  1625. if (dbg_hc(chan))
  1626. dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
  1627. (unsigned long)dma_addr, chan->hc_num);
  1628. }
  1629. /* Start the split */
  1630. if (chan->do_split) {
  1631. u32 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num));
  1632. hcsplt |= HCSPLT_SPLTENA;
  1633. writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
  1634. }
  1635. hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
  1636. hcchar &= ~HCCHAR_MULTICNT_MASK;
  1637. hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
  1638. HCCHAR_MULTICNT_MASK;
  1639. dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
  1640. if (hcchar & HCCHAR_CHDIS)
  1641. dev_warn(hsotg->dev,
  1642. "%s: chdis set, channel %d, hcchar 0x%08x\n",
  1643. __func__, chan->hc_num, hcchar);
  1644. /* Set host channel enable after all other setup is complete */
  1645. hcchar |= HCCHAR_CHENA;
  1646. hcchar &= ~HCCHAR_CHDIS;
  1647. if (dbg_hc(chan))
  1648. dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
  1649. (hcchar & HCCHAR_MULTICNT_MASK) >>
  1650. HCCHAR_MULTICNT_SHIFT);
  1651. writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
  1652. if (dbg_hc(chan))
  1653. dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
  1654. chan->hc_num);
  1655. chan->xfer_started = 1;
  1656. chan->requests++;
  1657. if (hsotg->core_params->dma_enable <= 0 &&
  1658. !chan->ep_is_in && chan->xfer_len > 0)
  1659. /* Load OUT packet into the appropriate Tx FIFO */
  1660. dwc2_hc_write_packet(hsotg, chan);
  1661. }
  1662. /**
  1663. * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
  1664. * host channel and starts the transfer in Descriptor DMA mode
  1665. *
  1666. * @hsotg: Programming view of DWC_otg controller
  1667. * @chan: Information needed to initialize the host channel
  1668. *
  1669. * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
  1670. * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
  1671. * with micro-frame bitmap.
  1672. *
  1673. * Initializes HCDMA register with descriptor list address and CTD value then
  1674. * starts the transfer via enabling the channel.
  1675. */
  1676. void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
  1677. struct dwc2_host_chan *chan)
  1678. {
  1679. u32 hcchar;
  1680. u32 hc_dma;
  1681. u32 hctsiz = 0;
  1682. if (chan->do_ping)
  1683. hctsiz |= TSIZ_DOPNG;
  1684. if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
  1685. dwc2_set_pid_isoc(chan);
  1686. /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
  1687. hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
  1688. TSIZ_SC_MC_PID_MASK;
  1689. /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
  1690. hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
  1691. /* Non-zero only for high-speed interrupt endpoints */
  1692. hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
  1693. if (dbg_hc(chan)) {
  1694. dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
  1695. chan->hc_num);
  1696. dev_vdbg(hsotg->dev, " Start PID: %d\n",
  1697. chan->data_pid_start);
  1698. dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
  1699. }
  1700. writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
  1701. hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
  1702. /* Always start from first descriptor */
  1703. hc_dma &= ~HCDMA_CTD_MASK;
  1704. writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num));
  1705. if (dbg_hc(chan))
  1706. dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
  1707. hc_dma, chan->hc_num);
  1708. hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
  1709. hcchar &= ~HCCHAR_MULTICNT_MASK;
  1710. hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
  1711. HCCHAR_MULTICNT_MASK;
  1712. if (hcchar & HCCHAR_CHDIS)
  1713. dev_warn(hsotg->dev,
  1714. "%s: chdis set, channel %d, hcchar 0x%08x\n",
  1715. __func__, chan->hc_num, hcchar);
  1716. /* Set host channel enable after all other setup is complete */
  1717. hcchar |= HCCHAR_CHENA;
  1718. hcchar &= ~HCCHAR_CHDIS;
  1719. if (dbg_hc(chan))
  1720. dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
  1721. (hcchar & HCCHAR_MULTICNT_MASK) >>
  1722. HCCHAR_MULTICNT_SHIFT);
  1723. writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
  1724. if (dbg_hc(chan))
  1725. dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
  1726. chan->hc_num);
  1727. chan->xfer_started = 1;
  1728. chan->requests++;
  1729. }
  1730. /**
  1731. * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
  1732. * a previous call to dwc2_hc_start_transfer()
  1733. *
  1734. * @hsotg: Programming view of DWC_otg controller
  1735. * @chan: Information needed to initialize the host channel
  1736. *
  1737. * The caller must ensure there is sufficient space in the request queue and Tx
  1738. * Data FIFO. This function should only be called in Slave mode. In DMA mode,
  1739. * the controller acts autonomously to complete transfers programmed to a host
  1740. * channel.
  1741. *
  1742. * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
  1743. * if there is any data remaining to be queued. For an IN transfer, another
  1744. * data packet is always requested. For the SETUP phase of a control transfer,
  1745. * this function does nothing.
  1746. *
  1747. * Return: 1 if a new request is queued, 0 if no more requests are required
  1748. * for this transfer
  1749. */
  1750. int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
  1751. struct dwc2_host_chan *chan)
  1752. {
  1753. if (dbg_hc(chan))
  1754. dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
  1755. chan->hc_num);
  1756. if (chan->do_split)
  1757. /* SPLITs always queue just once per channel */
  1758. return 0;
  1759. if (chan->data_pid_start == DWC2_HC_PID_SETUP)
  1760. /* SETUPs are queued only once since they can't be NAK'd */
  1761. return 0;
  1762. if (chan->ep_is_in) {
  1763. /*
  1764. * Always queue another request for other IN transfers. If
  1765. * back-to-back INs are issued and NAKs are received for both,
  1766. * the driver may still be processing the first NAK when the
  1767. * second NAK is received. When the interrupt handler clears
  1768. * the NAK interrupt for the first NAK, the second NAK will
  1769. * not be seen. So we can't depend on the NAK interrupt
  1770. * handler to requeue a NAK'd request. Instead, IN requests
  1771. * are issued each time this function is called. When the
  1772. * transfer completes, the extra requests for the channel will
  1773. * be flushed.
  1774. */
  1775. u32 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
  1776. dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
  1777. hcchar |= HCCHAR_CHENA;
  1778. hcchar &= ~HCCHAR_CHDIS;
  1779. if (dbg_hc(chan))
  1780. dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
  1781. hcchar);
  1782. writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
  1783. chan->requests++;
  1784. return 1;
  1785. }
  1786. /* OUT transfers */
  1787. if (chan->xfer_count < chan->xfer_len) {
  1788. if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
  1789. chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
  1790. u32 hcchar = readl(hsotg->regs +
  1791. HCCHAR(chan->hc_num));
  1792. dwc2_hc_set_even_odd_frame(hsotg, chan,
  1793. &hcchar);
  1794. }
  1795. /* Load OUT packet into the appropriate Tx FIFO */
  1796. dwc2_hc_write_packet(hsotg, chan);
  1797. chan->requests++;
  1798. return 1;
  1799. }
  1800. return 0;
  1801. }
  1802. /**
  1803. * dwc2_hc_do_ping() - Starts a PING transfer
  1804. *
  1805. * @hsotg: Programming view of DWC_otg controller
  1806. * @chan: Information needed to initialize the host channel
  1807. *
  1808. * This function should only be called in Slave mode. The Do Ping bit is set in
  1809. * the HCTSIZ register, then the channel is enabled.
  1810. */
  1811. void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
  1812. {
  1813. u32 hcchar;
  1814. u32 hctsiz;
  1815. if (dbg_hc(chan))
  1816. dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
  1817. chan->hc_num);
  1818. hctsiz = TSIZ_DOPNG;
  1819. hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
  1820. writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
  1821. hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
  1822. hcchar |= HCCHAR_CHENA;
  1823. hcchar &= ~HCCHAR_CHDIS;
  1824. writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
  1825. }
  1826. /**
  1827. * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
  1828. * the HFIR register according to PHY type and speed
  1829. *
  1830. * @hsotg: Programming view of DWC_otg controller
  1831. *
  1832. * NOTE: The caller can modify the value of the HFIR register only after the
  1833. * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
  1834. * has been set
  1835. */
  1836. u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
  1837. {
  1838. u32 usbcfg;
  1839. u32 hprt0;
  1840. int clock = 60; /* default value */
  1841. usbcfg = readl(hsotg->regs + GUSBCFG);
  1842. hprt0 = readl(hsotg->regs + HPRT0);
  1843. if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
  1844. !(usbcfg & GUSBCFG_PHYIF16))
  1845. clock = 60;
  1846. if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
  1847. GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
  1848. clock = 48;
  1849. if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
  1850. !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
  1851. clock = 30;
  1852. if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
  1853. !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
  1854. clock = 60;
  1855. if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
  1856. !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
  1857. clock = 48;
  1858. if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
  1859. hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
  1860. clock = 48;
  1861. if ((usbcfg & GUSBCFG_PHYSEL) &&
  1862. hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
  1863. clock = 48;
  1864. if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
  1865. /* High speed case */
  1866. return 125 * clock;
  1867. else
  1868. /* FS/LS case */
  1869. return 1000 * clock;
  1870. }
  1871. /**
  1872. * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
  1873. * buffer
  1874. *
  1875. * @core_if: Programming view of DWC_otg controller
  1876. * @dest: Destination buffer for the packet
  1877. * @bytes: Number of bytes to copy to the destination
  1878. */
  1879. void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
  1880. {
  1881. u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
  1882. u32 *data_buf = (u32 *)dest;
  1883. int word_count = (bytes + 3) / 4;
  1884. int i;
  1885. /*
  1886. * Todo: Account for the case where dest is not dword aligned. This
  1887. * requires reading data from the FIFO into a u32 temp buffer, then
  1888. * moving it into the data buffer.
  1889. */
  1890. dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
  1891. for (i = 0; i < word_count; i++, data_buf++)
  1892. *data_buf = readl(fifo);
  1893. }
  1894. /**
  1895. * dwc2_dump_host_registers() - Prints the host registers
  1896. *
  1897. * @hsotg: Programming view of DWC_otg controller
  1898. *
  1899. * NOTE: This function will be removed once the peripheral controller code
  1900. * is integrated and the driver is stable
  1901. */
  1902. void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
  1903. {
  1904. #ifdef DEBUG
  1905. u32 __iomem *addr;
  1906. int i;
  1907. dev_dbg(hsotg->dev, "Host Global Registers\n");
  1908. addr = hsotg->regs + HCFG;
  1909. dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
  1910. (unsigned long)addr, readl(addr));
  1911. addr = hsotg->regs + HFIR;
  1912. dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
  1913. (unsigned long)addr, readl(addr));
  1914. addr = hsotg->regs + HFNUM;
  1915. dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
  1916. (unsigned long)addr, readl(addr));
  1917. addr = hsotg->regs + HPTXSTS;
  1918. dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
  1919. (unsigned long)addr, readl(addr));
  1920. addr = hsotg->regs + HAINT;
  1921. dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
  1922. (unsigned long)addr, readl(addr));
  1923. addr = hsotg->regs + HAINTMSK;
  1924. dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
  1925. (unsigned long)addr, readl(addr));
  1926. if (hsotg->core_params->dma_desc_enable > 0) {
  1927. addr = hsotg->regs + HFLBADDR;
  1928. dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
  1929. (unsigned long)addr, readl(addr));
  1930. }
  1931. addr = hsotg->regs + HPRT0;
  1932. dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
  1933. (unsigned long)addr, readl(addr));
  1934. for (i = 0; i < hsotg->core_params->host_channels; i++) {
  1935. dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
  1936. addr = hsotg->regs + HCCHAR(i);
  1937. dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
  1938. (unsigned long)addr, readl(addr));
  1939. addr = hsotg->regs + HCSPLT(i);
  1940. dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
  1941. (unsigned long)addr, readl(addr));
  1942. addr = hsotg->regs + HCINT(i);
  1943. dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
  1944. (unsigned long)addr, readl(addr));
  1945. addr = hsotg->regs + HCINTMSK(i);
  1946. dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
  1947. (unsigned long)addr, readl(addr));
  1948. addr = hsotg->regs + HCTSIZ(i);
  1949. dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
  1950. (unsigned long)addr, readl(addr));
  1951. addr = hsotg->regs + HCDMA(i);
  1952. dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
  1953. (unsigned long)addr, readl(addr));
  1954. if (hsotg->core_params->dma_desc_enable > 0) {
  1955. addr = hsotg->regs + HCDMAB(i);
  1956. dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
  1957. (unsigned long)addr, readl(addr));
  1958. }
  1959. }
  1960. #endif
  1961. }
  1962. /**
  1963. * dwc2_dump_global_registers() - Prints the core global registers
  1964. *
  1965. * @hsotg: Programming view of DWC_otg controller
  1966. *
  1967. * NOTE: This function will be removed once the peripheral controller code
  1968. * is integrated and the driver is stable
  1969. */
  1970. void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
  1971. {
  1972. #ifdef DEBUG
  1973. u32 __iomem *addr;
  1974. dev_dbg(hsotg->dev, "Core Global Registers\n");
  1975. addr = hsotg->regs + GOTGCTL;
  1976. dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
  1977. (unsigned long)addr, readl(addr));
  1978. addr = hsotg->regs + GOTGINT;
  1979. dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
  1980. (unsigned long)addr, readl(addr));
  1981. addr = hsotg->regs + GAHBCFG;
  1982. dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
  1983. (unsigned long)addr, readl(addr));
  1984. addr = hsotg->regs + GUSBCFG;
  1985. dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
  1986. (unsigned long)addr, readl(addr));
  1987. addr = hsotg->regs + GRSTCTL;
  1988. dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
  1989. (unsigned long)addr, readl(addr));
  1990. addr = hsotg->regs + GINTSTS;
  1991. dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
  1992. (unsigned long)addr, readl(addr));
  1993. addr = hsotg->regs + GINTMSK;
  1994. dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
  1995. (unsigned long)addr, readl(addr));
  1996. addr = hsotg->regs + GRXSTSR;
  1997. dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
  1998. (unsigned long)addr, readl(addr));
  1999. addr = hsotg->regs + GRXFSIZ;
  2000. dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
  2001. (unsigned long)addr, readl(addr));
  2002. addr = hsotg->regs + GNPTXFSIZ;
  2003. dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
  2004. (unsigned long)addr, readl(addr));
  2005. addr = hsotg->regs + GNPTXSTS;
  2006. dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
  2007. (unsigned long)addr, readl(addr));
  2008. addr = hsotg->regs + GI2CCTL;
  2009. dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
  2010. (unsigned long)addr, readl(addr));
  2011. addr = hsotg->regs + GPVNDCTL;
  2012. dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
  2013. (unsigned long)addr, readl(addr));
  2014. addr = hsotg->regs + GGPIO;
  2015. dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
  2016. (unsigned long)addr, readl(addr));
  2017. addr = hsotg->regs + GUID;
  2018. dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
  2019. (unsigned long)addr, readl(addr));
  2020. addr = hsotg->regs + GSNPSID;
  2021. dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
  2022. (unsigned long)addr, readl(addr));
  2023. addr = hsotg->regs + GHWCFG1;
  2024. dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
  2025. (unsigned long)addr, readl(addr));
  2026. addr = hsotg->regs + GHWCFG2;
  2027. dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
  2028. (unsigned long)addr, readl(addr));
  2029. addr = hsotg->regs + GHWCFG3;
  2030. dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
  2031. (unsigned long)addr, readl(addr));
  2032. addr = hsotg->regs + GHWCFG4;
  2033. dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
  2034. (unsigned long)addr, readl(addr));
  2035. addr = hsotg->regs + GLPMCFG;
  2036. dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
  2037. (unsigned long)addr, readl(addr));
  2038. addr = hsotg->regs + GPWRDN;
  2039. dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
  2040. (unsigned long)addr, readl(addr));
  2041. addr = hsotg->regs + GDFIFOCFG;
  2042. dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
  2043. (unsigned long)addr, readl(addr));
  2044. addr = hsotg->regs + HPTXFSIZ;
  2045. dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
  2046. (unsigned long)addr, readl(addr));
  2047. addr = hsotg->regs + PCGCTL;
  2048. dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
  2049. (unsigned long)addr, readl(addr));
  2050. #endif
  2051. }
  2052. /**
  2053. * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
  2054. *
  2055. * @hsotg: Programming view of DWC_otg controller
  2056. * @num: Tx FIFO to flush
  2057. */
  2058. void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
  2059. {
  2060. u32 greset;
  2061. int count = 0;
  2062. dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
  2063. greset = GRSTCTL_TXFFLSH;
  2064. greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
  2065. writel(greset, hsotg->regs + GRSTCTL);
  2066. do {
  2067. greset = readl(hsotg->regs + GRSTCTL);
  2068. if (++count > 10000) {
  2069. dev_warn(hsotg->dev,
  2070. "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
  2071. __func__, greset,
  2072. readl(hsotg->regs + GNPTXSTS));
  2073. break;
  2074. }
  2075. udelay(1);
  2076. } while (greset & GRSTCTL_TXFFLSH);
  2077. /* Wait for at least 3 PHY Clocks */
  2078. udelay(1);
  2079. }
  2080. /**
  2081. * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
  2082. *
  2083. * @hsotg: Programming view of DWC_otg controller
  2084. */
  2085. void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
  2086. {
  2087. u32 greset;
  2088. int count = 0;
  2089. dev_vdbg(hsotg->dev, "%s()\n", __func__);
  2090. greset = GRSTCTL_RXFFLSH;
  2091. writel(greset, hsotg->regs + GRSTCTL);
  2092. do {
  2093. greset = readl(hsotg->regs + GRSTCTL);
  2094. if (++count > 10000) {
  2095. dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
  2096. __func__, greset);
  2097. break;
  2098. }
  2099. udelay(1);
  2100. } while (greset & GRSTCTL_RXFFLSH);
  2101. /* Wait for at least 3 PHY Clocks */
  2102. udelay(1);
  2103. }
  2104. #define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
  2105. /* Parameter access functions */
  2106. void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
  2107. {
  2108. int valid = 1;
  2109. switch (val) {
  2110. case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
  2111. if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
  2112. valid = 0;
  2113. break;
  2114. case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
  2115. switch (hsotg->hw_params.op_mode) {
  2116. case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
  2117. case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
  2118. case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
  2119. case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
  2120. break;
  2121. default:
  2122. valid = 0;
  2123. break;
  2124. }
  2125. break;
  2126. case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
  2127. /* always valid */
  2128. break;
  2129. default:
  2130. valid = 0;
  2131. break;
  2132. }
  2133. if (!valid) {
  2134. if (val >= 0)
  2135. dev_err(hsotg->dev,
  2136. "%d invalid for otg_cap parameter. Check HW configuration.\n",
  2137. val);
  2138. switch (hsotg->hw_params.op_mode) {
  2139. case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
  2140. val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
  2141. break;
  2142. case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
  2143. case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
  2144. case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
  2145. val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
  2146. break;
  2147. default:
  2148. val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
  2149. break;
  2150. }
  2151. dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
  2152. }
  2153. hsotg->core_params->otg_cap = val;
  2154. }
  2155. void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
  2156. {
  2157. int valid = 1;
  2158. if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
  2159. valid = 0;
  2160. if (val < 0)
  2161. valid = 0;
  2162. if (!valid) {
  2163. if (val >= 0)
  2164. dev_err(hsotg->dev,
  2165. "%d invalid for dma_enable parameter. Check HW configuration.\n",
  2166. val);
  2167. val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
  2168. dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
  2169. }
  2170. hsotg->core_params->dma_enable = val;
  2171. }
  2172. void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
  2173. {
  2174. int valid = 1;
  2175. if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
  2176. !hsotg->hw_params.dma_desc_enable))
  2177. valid = 0;
  2178. if (val < 0)
  2179. valid = 0;
  2180. if (!valid) {
  2181. if (val >= 0)
  2182. dev_err(hsotg->dev,
  2183. "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
  2184. val);
  2185. val = (hsotg->core_params->dma_enable > 0 &&
  2186. hsotg->hw_params.dma_desc_enable);
  2187. dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
  2188. }
  2189. hsotg->core_params->dma_desc_enable = val;
  2190. }
  2191. void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
  2192. int val)
  2193. {
  2194. if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
  2195. if (val >= 0) {
  2196. dev_err(hsotg->dev,
  2197. "Wrong value for host_support_fs_low_power\n");
  2198. dev_err(hsotg->dev,
  2199. "host_support_fs_low_power must be 0 or 1\n");
  2200. }
  2201. val = 0;
  2202. dev_dbg(hsotg->dev,
  2203. "Setting host_support_fs_low_power to %d\n", val);
  2204. }
  2205. hsotg->core_params->host_support_fs_ls_low_power = val;
  2206. }
  2207. void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
  2208. {
  2209. int valid = 1;
  2210. if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
  2211. valid = 0;
  2212. if (val < 0)
  2213. valid = 0;
  2214. if (!valid) {
  2215. if (val >= 0)
  2216. dev_err(hsotg->dev,
  2217. "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
  2218. val);
  2219. val = hsotg->hw_params.enable_dynamic_fifo;
  2220. dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
  2221. }
  2222. hsotg->core_params->enable_dynamic_fifo = val;
  2223. }
  2224. void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
  2225. {
  2226. int valid = 1;
  2227. if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
  2228. valid = 0;
  2229. if (!valid) {
  2230. if (val >= 0)
  2231. dev_err(hsotg->dev,
  2232. "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
  2233. val);
  2234. val = hsotg->hw_params.host_rx_fifo_size;
  2235. dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
  2236. }
  2237. hsotg->core_params->host_rx_fifo_size = val;
  2238. }
  2239. void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
  2240. {
  2241. int valid = 1;
  2242. if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
  2243. valid = 0;
  2244. if (!valid) {
  2245. if (val >= 0)
  2246. dev_err(hsotg->dev,
  2247. "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
  2248. val);
  2249. val = hsotg->hw_params.host_nperio_tx_fifo_size;
  2250. dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
  2251. val);
  2252. }
  2253. hsotg->core_params->host_nperio_tx_fifo_size = val;
  2254. }
  2255. void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
  2256. {
  2257. int valid = 1;
  2258. if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
  2259. valid = 0;
  2260. if (!valid) {
  2261. if (val >= 0)
  2262. dev_err(hsotg->dev,
  2263. "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
  2264. val);
  2265. val = hsotg->hw_params.host_perio_tx_fifo_size;
  2266. dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
  2267. val);
  2268. }
  2269. hsotg->core_params->host_perio_tx_fifo_size = val;
  2270. }
  2271. void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
  2272. {
  2273. int valid = 1;
  2274. if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
  2275. valid = 0;
  2276. if (!valid) {
  2277. if (val >= 0)
  2278. dev_err(hsotg->dev,
  2279. "%d invalid for max_transfer_size. Check HW configuration.\n",
  2280. val);
  2281. val = hsotg->hw_params.max_transfer_size;
  2282. dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
  2283. }
  2284. hsotg->core_params->max_transfer_size = val;
  2285. }
  2286. void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
  2287. {
  2288. int valid = 1;
  2289. if (val < 15 || val > hsotg->hw_params.max_packet_count)
  2290. valid = 0;
  2291. if (!valid) {
  2292. if (val >= 0)
  2293. dev_err(hsotg->dev,
  2294. "%d invalid for max_packet_count. Check HW configuration.\n",
  2295. val);
  2296. val = hsotg->hw_params.max_packet_count;
  2297. dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
  2298. }
  2299. hsotg->core_params->max_packet_count = val;
  2300. }
  2301. void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
  2302. {
  2303. int valid = 1;
  2304. if (val < 1 || val > hsotg->hw_params.host_channels)
  2305. valid = 0;
  2306. if (!valid) {
  2307. if (val >= 0)
  2308. dev_err(hsotg->dev,
  2309. "%d invalid for host_channels. Check HW configuration.\n",
  2310. val);
  2311. val = hsotg->hw_params.host_channels;
  2312. dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
  2313. }
  2314. hsotg->core_params->host_channels = val;
  2315. }
  2316. void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
  2317. {
  2318. int valid = 0;
  2319. u32 hs_phy_type, fs_phy_type;
  2320. if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
  2321. DWC2_PHY_TYPE_PARAM_ULPI)) {
  2322. if (val >= 0) {
  2323. dev_err(hsotg->dev, "Wrong value for phy_type\n");
  2324. dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
  2325. }
  2326. valid = 0;
  2327. }
  2328. hs_phy_type = hsotg->hw_params.hs_phy_type;
  2329. fs_phy_type = hsotg->hw_params.fs_phy_type;
  2330. if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
  2331. (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
  2332. hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
  2333. valid = 1;
  2334. else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
  2335. (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
  2336. hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
  2337. valid = 1;
  2338. else if (val == DWC2_PHY_TYPE_PARAM_FS &&
  2339. fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
  2340. valid = 1;
  2341. if (!valid) {
  2342. if (val >= 0)
  2343. dev_err(hsotg->dev,
  2344. "%d invalid for phy_type. Check HW configuration.\n",
  2345. val);
  2346. val = DWC2_PHY_TYPE_PARAM_FS;
  2347. if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
  2348. if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
  2349. hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
  2350. val = DWC2_PHY_TYPE_PARAM_UTMI;
  2351. else
  2352. val = DWC2_PHY_TYPE_PARAM_ULPI;
  2353. }
  2354. dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
  2355. }
  2356. hsotg->core_params->phy_type = val;
  2357. }
  2358. static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
  2359. {
  2360. return hsotg->core_params->phy_type;
  2361. }
  2362. void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
  2363. {
  2364. int valid = 1;
  2365. if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
  2366. if (val >= 0) {
  2367. dev_err(hsotg->dev, "Wrong value for speed parameter\n");
  2368. dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
  2369. }
  2370. valid = 0;
  2371. }
  2372. if (val == DWC2_SPEED_PARAM_HIGH &&
  2373. dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
  2374. valid = 0;
  2375. if (!valid) {
  2376. if (val >= 0)
  2377. dev_err(hsotg->dev,
  2378. "%d invalid for speed parameter. Check HW configuration.\n",
  2379. val);
  2380. val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
  2381. DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
  2382. dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
  2383. }
  2384. hsotg->core_params->speed = val;
  2385. }
  2386. void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
  2387. {
  2388. int valid = 1;
  2389. if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
  2390. DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
  2391. if (val >= 0) {
  2392. dev_err(hsotg->dev,
  2393. "Wrong value for host_ls_low_power_phy_clk parameter\n");
  2394. dev_err(hsotg->dev,
  2395. "host_ls_low_power_phy_clk must be 0 or 1\n");
  2396. }
  2397. valid = 0;
  2398. }
  2399. if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
  2400. dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
  2401. valid = 0;
  2402. if (!valid) {
  2403. if (val >= 0)
  2404. dev_err(hsotg->dev,
  2405. "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
  2406. val);
  2407. val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
  2408. ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
  2409. : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
  2410. dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
  2411. val);
  2412. }
  2413. hsotg->core_params->host_ls_low_power_phy_clk = val;
  2414. }
  2415. void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
  2416. {
  2417. if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
  2418. if (val >= 0) {
  2419. dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
  2420. dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
  2421. }
  2422. val = 0;
  2423. dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
  2424. }
  2425. hsotg->core_params->phy_ulpi_ddr = val;
  2426. }
  2427. void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
  2428. {
  2429. if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
  2430. if (val >= 0) {
  2431. dev_err(hsotg->dev,
  2432. "Wrong value for phy_ulpi_ext_vbus\n");
  2433. dev_err(hsotg->dev,
  2434. "phy_ulpi_ext_vbus must be 0 or 1\n");
  2435. }
  2436. val = 0;
  2437. dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
  2438. }
  2439. hsotg->core_params->phy_ulpi_ext_vbus = val;
  2440. }
  2441. void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
  2442. {
  2443. int valid = 0;
  2444. switch (hsotg->hw_params.utmi_phy_data_width) {
  2445. case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
  2446. valid = (val == 8);
  2447. break;
  2448. case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
  2449. valid = (val == 16);
  2450. break;
  2451. case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
  2452. valid = (val == 8 || val == 16);
  2453. break;
  2454. }
  2455. if (!valid) {
  2456. if (val >= 0) {
  2457. dev_err(hsotg->dev,
  2458. "%d invalid for phy_utmi_width. Check HW configuration.\n",
  2459. val);
  2460. }
  2461. val = (hsotg->hw_params.utmi_phy_data_width ==
  2462. GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
  2463. dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
  2464. }
  2465. hsotg->core_params->phy_utmi_width = val;
  2466. }
  2467. void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
  2468. {
  2469. if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
  2470. if (val >= 0) {
  2471. dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
  2472. dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
  2473. }
  2474. val = 0;
  2475. dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
  2476. }
  2477. hsotg->core_params->ulpi_fs_ls = val;
  2478. }
  2479. void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
  2480. {
  2481. if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
  2482. if (val >= 0) {
  2483. dev_err(hsotg->dev, "Wrong value for ts_dline\n");
  2484. dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
  2485. }
  2486. val = 0;
  2487. dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
  2488. }
  2489. hsotg->core_params->ts_dline = val;
  2490. }
  2491. void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
  2492. {
  2493. int valid = 1;
  2494. if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
  2495. if (val >= 0) {
  2496. dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
  2497. dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
  2498. }
  2499. valid = 0;
  2500. }
  2501. if (val == 1 && !(hsotg->hw_params.i2c_enable))
  2502. valid = 0;
  2503. if (!valid) {
  2504. if (val >= 0)
  2505. dev_err(hsotg->dev,
  2506. "%d invalid for i2c_enable. Check HW configuration.\n",
  2507. val);
  2508. val = hsotg->hw_params.i2c_enable;
  2509. dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
  2510. }
  2511. hsotg->core_params->i2c_enable = val;
  2512. }
  2513. void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
  2514. {
  2515. int valid = 1;
  2516. if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
  2517. if (val >= 0) {
  2518. dev_err(hsotg->dev,
  2519. "Wrong value for en_multiple_tx_fifo,\n");
  2520. dev_err(hsotg->dev,
  2521. "en_multiple_tx_fifo must be 0 or 1\n");
  2522. }
  2523. valid = 0;
  2524. }
  2525. if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
  2526. valid = 0;
  2527. if (!valid) {
  2528. if (val >= 0)
  2529. dev_err(hsotg->dev,
  2530. "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
  2531. val);
  2532. val = hsotg->hw_params.en_multiple_tx_fifo;
  2533. dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
  2534. }
  2535. hsotg->core_params->en_multiple_tx_fifo = val;
  2536. }
  2537. void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
  2538. {
  2539. int valid = 1;
  2540. if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
  2541. if (val >= 0) {
  2542. dev_err(hsotg->dev,
  2543. "'%d' invalid for parameter reload_ctl\n", val);
  2544. dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
  2545. }
  2546. valid = 0;
  2547. }
  2548. if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
  2549. valid = 0;
  2550. if (!valid) {
  2551. if (val >= 0)
  2552. dev_err(hsotg->dev,
  2553. "%d invalid for parameter reload_ctl. Check HW configuration.\n",
  2554. val);
  2555. val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
  2556. dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
  2557. }
  2558. hsotg->core_params->reload_ctl = val;
  2559. }
  2560. void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
  2561. {
  2562. if (val != -1)
  2563. hsotg->core_params->ahbcfg = val;
  2564. else
  2565. hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
  2566. GAHBCFG_HBSTLEN_SHIFT;
  2567. }
  2568. void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
  2569. {
  2570. if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
  2571. if (val >= 0) {
  2572. dev_err(hsotg->dev,
  2573. "'%d' invalid for parameter otg_ver\n", val);
  2574. dev_err(hsotg->dev,
  2575. "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
  2576. }
  2577. val = 0;
  2578. dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
  2579. }
  2580. hsotg->core_params->otg_ver = val;
  2581. }
  2582. static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
  2583. {
  2584. if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
  2585. if (val >= 0) {
  2586. dev_err(hsotg->dev,
  2587. "'%d' invalid for parameter uframe_sched\n",
  2588. val);
  2589. dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
  2590. }
  2591. val = 1;
  2592. dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
  2593. }
  2594. hsotg->core_params->uframe_sched = val;
  2595. }
  2596. static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
  2597. int val)
  2598. {
  2599. if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
  2600. if (val >= 0) {
  2601. dev_err(hsotg->dev,
  2602. "'%d' invalid for parameter external_id_pin_ctl\n",
  2603. val);
  2604. dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
  2605. }
  2606. val = 0;
  2607. dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
  2608. }
  2609. hsotg->core_params->external_id_pin_ctl = val;
  2610. }
  2611. static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
  2612. int val)
  2613. {
  2614. if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
  2615. if (val >= 0) {
  2616. dev_err(hsotg->dev,
  2617. "'%d' invalid for parameter hibernation\n",
  2618. val);
  2619. dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
  2620. }
  2621. val = 0;
  2622. dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
  2623. }
  2624. hsotg->core_params->hibernation = val;
  2625. }
  2626. /*
  2627. * This function is called during module intialization to pass module parameters
  2628. * for the DWC_otg core.
  2629. */
  2630. void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
  2631. const struct dwc2_core_params *params)
  2632. {
  2633. dev_dbg(hsotg->dev, "%s()\n", __func__);
  2634. dwc2_set_param_otg_cap(hsotg, params->otg_cap);
  2635. dwc2_set_param_dma_enable(hsotg, params->dma_enable);
  2636. dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
  2637. dwc2_set_param_host_support_fs_ls_low_power(hsotg,
  2638. params->host_support_fs_ls_low_power);
  2639. dwc2_set_param_enable_dynamic_fifo(hsotg,
  2640. params->enable_dynamic_fifo);
  2641. dwc2_set_param_host_rx_fifo_size(hsotg,
  2642. params->host_rx_fifo_size);
  2643. dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
  2644. params->host_nperio_tx_fifo_size);
  2645. dwc2_set_param_host_perio_tx_fifo_size(hsotg,
  2646. params->host_perio_tx_fifo_size);
  2647. dwc2_set_param_max_transfer_size(hsotg,
  2648. params->max_transfer_size);
  2649. dwc2_set_param_max_packet_count(hsotg,
  2650. params->max_packet_count);
  2651. dwc2_set_param_host_channels(hsotg, params->host_channels);
  2652. dwc2_set_param_phy_type(hsotg, params->phy_type);
  2653. dwc2_set_param_speed(hsotg, params->speed);
  2654. dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
  2655. params->host_ls_low_power_phy_clk);
  2656. dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
  2657. dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
  2658. params->phy_ulpi_ext_vbus);
  2659. dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
  2660. dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
  2661. dwc2_set_param_ts_dline(hsotg, params->ts_dline);
  2662. dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
  2663. dwc2_set_param_en_multiple_tx_fifo(hsotg,
  2664. params->en_multiple_tx_fifo);
  2665. dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
  2666. dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
  2667. dwc2_set_param_otg_ver(hsotg, params->otg_ver);
  2668. dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
  2669. dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
  2670. dwc2_set_param_hibernation(hsotg, params->hibernation);
  2671. }
  2672. /**
  2673. * During device initialization, read various hardware configuration
  2674. * registers and interpret the contents.
  2675. */
  2676. int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
  2677. {
  2678. struct dwc2_hw_params *hw = &hsotg->hw_params;
  2679. unsigned width;
  2680. u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
  2681. u32 hptxfsiz, grxfsiz, gnptxfsiz;
  2682. u32 gusbcfg;
  2683. /*
  2684. * Attempt to ensure this device is really a DWC_otg Controller.
  2685. * Read and verify the GSNPSID register contents. The value should be
  2686. * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
  2687. * as in "OTG version 2.xx" or "OTG version 3.xx".
  2688. */
  2689. hw->snpsid = readl(hsotg->regs + GSNPSID);
  2690. if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
  2691. (hw->snpsid & 0xfffff000) != 0x4f543000) {
  2692. dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
  2693. hw->snpsid);
  2694. return -ENODEV;
  2695. }
  2696. dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
  2697. hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
  2698. hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
  2699. hwcfg1 = readl(hsotg->regs + GHWCFG1);
  2700. hwcfg2 = readl(hsotg->regs + GHWCFG2);
  2701. hwcfg3 = readl(hsotg->regs + GHWCFG3);
  2702. hwcfg4 = readl(hsotg->regs + GHWCFG4);
  2703. grxfsiz = readl(hsotg->regs + GRXFSIZ);
  2704. dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
  2705. dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
  2706. dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
  2707. dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
  2708. dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
  2709. /* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */
  2710. gusbcfg = readl(hsotg->regs + GUSBCFG);
  2711. gusbcfg |= GUSBCFG_FORCEHOSTMODE;
  2712. writel(gusbcfg, hsotg->regs + GUSBCFG);
  2713. usleep_range(100000, 150000);
  2714. gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ);
  2715. hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
  2716. dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
  2717. dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
  2718. gusbcfg = readl(hsotg->regs + GUSBCFG);
  2719. gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
  2720. writel(gusbcfg, hsotg->regs + GUSBCFG);
  2721. usleep_range(100000, 150000);
  2722. /* hwcfg2 */
  2723. hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
  2724. GHWCFG2_OP_MODE_SHIFT;
  2725. hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
  2726. GHWCFG2_ARCHITECTURE_SHIFT;
  2727. hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
  2728. hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
  2729. GHWCFG2_NUM_HOST_CHAN_SHIFT);
  2730. hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
  2731. GHWCFG2_HS_PHY_TYPE_SHIFT;
  2732. hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
  2733. GHWCFG2_FS_PHY_TYPE_SHIFT;
  2734. hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
  2735. GHWCFG2_NUM_DEV_EP_SHIFT;
  2736. hw->nperio_tx_q_depth =
  2737. (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
  2738. GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
  2739. hw->host_perio_tx_q_depth =
  2740. (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
  2741. GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
  2742. hw->dev_token_q_depth =
  2743. (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
  2744. GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
  2745. /* hwcfg3 */
  2746. width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
  2747. GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
  2748. hw->max_transfer_size = (1 << (width + 11)) - 1;
  2749. /*
  2750. * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
  2751. * coherent buffers with this size, and if it's too large we can
  2752. * exhaust the coherent DMA pool.
  2753. */
  2754. if (hw->max_transfer_size > 65535)
  2755. hw->max_transfer_size = 65535;
  2756. width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
  2757. GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
  2758. hw->max_packet_count = (1 << (width + 4)) - 1;
  2759. hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
  2760. hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
  2761. GHWCFG3_DFIFO_DEPTH_SHIFT;
  2762. /* hwcfg4 */
  2763. hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
  2764. hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
  2765. GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
  2766. hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
  2767. hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
  2768. hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
  2769. GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
  2770. /* fifo sizes */
  2771. hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
  2772. GRXFSIZ_DEPTH_SHIFT;
  2773. hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
  2774. FIFOSIZE_DEPTH_SHIFT;
  2775. hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
  2776. FIFOSIZE_DEPTH_SHIFT;
  2777. dev_dbg(hsotg->dev, "Detected values from hardware:\n");
  2778. dev_dbg(hsotg->dev, " op_mode=%d\n",
  2779. hw->op_mode);
  2780. dev_dbg(hsotg->dev, " arch=%d\n",
  2781. hw->arch);
  2782. dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
  2783. hw->dma_desc_enable);
  2784. dev_dbg(hsotg->dev, " power_optimized=%d\n",
  2785. hw->power_optimized);
  2786. dev_dbg(hsotg->dev, " i2c_enable=%d\n",
  2787. hw->i2c_enable);
  2788. dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
  2789. hw->hs_phy_type);
  2790. dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
  2791. hw->fs_phy_type);
  2792. dev_dbg(hsotg->dev, " utmi_phy_data_wdith=%d\n",
  2793. hw->utmi_phy_data_width);
  2794. dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
  2795. hw->num_dev_ep);
  2796. dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
  2797. hw->num_dev_perio_in_ep);
  2798. dev_dbg(hsotg->dev, " host_channels=%d\n",
  2799. hw->host_channels);
  2800. dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
  2801. hw->max_transfer_size);
  2802. dev_dbg(hsotg->dev, " max_packet_count=%d\n",
  2803. hw->max_packet_count);
  2804. dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
  2805. hw->nperio_tx_q_depth);
  2806. dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
  2807. hw->host_perio_tx_q_depth);
  2808. dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
  2809. hw->dev_token_q_depth);
  2810. dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
  2811. hw->enable_dynamic_fifo);
  2812. dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
  2813. hw->en_multiple_tx_fifo);
  2814. dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
  2815. hw->total_fifo_size);
  2816. dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
  2817. hw->host_rx_fifo_size);
  2818. dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
  2819. hw->host_nperio_tx_fifo_size);
  2820. dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
  2821. hw->host_perio_tx_fifo_size);
  2822. dev_dbg(hsotg->dev, "\n");
  2823. return 0;
  2824. }
  2825. /*
  2826. * Sets all parameters to the given value.
  2827. *
  2828. * Assumes that the dwc2_core_params struct contains only integers.
  2829. */
  2830. void dwc2_set_all_params(struct dwc2_core_params *params, int value)
  2831. {
  2832. int *p = (int *)params;
  2833. size_t size = sizeof(*params) / sizeof(*p);
  2834. int i;
  2835. for (i = 0; i < size; i++)
  2836. p[i] = value;
  2837. }
  2838. u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
  2839. {
  2840. return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
  2841. }
  2842. bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
  2843. {
  2844. if (readl(hsotg->regs + GSNPSID) == 0xffffffff)
  2845. return false;
  2846. else
  2847. return true;
  2848. }
  2849. /**
  2850. * dwc2_enable_global_interrupts() - Enables the controller's Global
  2851. * Interrupt in the AHB Config register
  2852. *
  2853. * @hsotg: Programming view of DWC_otg controller
  2854. */
  2855. void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
  2856. {
  2857. u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
  2858. ahbcfg |= GAHBCFG_GLBL_INTR_EN;
  2859. writel(ahbcfg, hsotg->regs + GAHBCFG);
  2860. }
  2861. /**
  2862. * dwc2_disable_global_interrupts() - Disables the controller's Global
  2863. * Interrupt in the AHB Config register
  2864. *
  2865. * @hsotg: Programming view of DWC_otg controller
  2866. */
  2867. void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
  2868. {
  2869. u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
  2870. ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
  2871. writel(ahbcfg, hsotg->regs + GAHBCFG);
  2872. }
  2873. MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
  2874. MODULE_AUTHOR("Synopsys, Inc.");
  2875. MODULE_LICENSE("Dual BSD/GPL");