intel_runtime_pm.c 87 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976
  1. /*
  2. * Copyright © 2012-2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eugeni Dodonov <eugeni.dodonov@intel.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. *
  27. */
  28. #include <linux/pm_runtime.h>
  29. #include <linux/vgaarb.h>
  30. #include "i915_drv.h"
  31. #include "intel_drv.h"
  32. /**
  33. * DOC: runtime pm
  34. *
  35. * The i915 driver supports dynamic enabling and disabling of entire hardware
  36. * blocks at runtime. This is especially important on the display side where
  37. * software is supposed to control many power gates manually on recent hardware,
  38. * since on the GT side a lot of the power management is done by the hardware.
  39. * But even there some manual control at the device level is required.
  40. *
  41. * Since i915 supports a diverse set of platforms with a unified codebase and
  42. * hardware engineers just love to shuffle functionality around between power
  43. * domains there's a sizeable amount of indirection required. This file provides
  44. * generic functions to the driver for grabbing and releasing references for
  45. * abstract power domains. It then maps those to the actual power wells
  46. * present for a given platform.
  47. */
  48. bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  49. int power_well_id);
  50. static struct i915_power_well *
  51. lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
  52. const char *
  53. intel_display_power_domain_str(enum intel_display_power_domain domain)
  54. {
  55. switch (domain) {
  56. case POWER_DOMAIN_PIPE_A:
  57. return "PIPE_A";
  58. case POWER_DOMAIN_PIPE_B:
  59. return "PIPE_B";
  60. case POWER_DOMAIN_PIPE_C:
  61. return "PIPE_C";
  62. case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
  63. return "PIPE_A_PANEL_FITTER";
  64. case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
  65. return "PIPE_B_PANEL_FITTER";
  66. case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
  67. return "PIPE_C_PANEL_FITTER";
  68. case POWER_DOMAIN_TRANSCODER_A:
  69. return "TRANSCODER_A";
  70. case POWER_DOMAIN_TRANSCODER_B:
  71. return "TRANSCODER_B";
  72. case POWER_DOMAIN_TRANSCODER_C:
  73. return "TRANSCODER_C";
  74. case POWER_DOMAIN_TRANSCODER_EDP:
  75. return "TRANSCODER_EDP";
  76. case POWER_DOMAIN_TRANSCODER_DSI_A:
  77. return "TRANSCODER_DSI_A";
  78. case POWER_DOMAIN_TRANSCODER_DSI_C:
  79. return "TRANSCODER_DSI_C";
  80. case POWER_DOMAIN_PORT_DDI_A_LANES:
  81. return "PORT_DDI_A_LANES";
  82. case POWER_DOMAIN_PORT_DDI_B_LANES:
  83. return "PORT_DDI_B_LANES";
  84. case POWER_DOMAIN_PORT_DDI_C_LANES:
  85. return "PORT_DDI_C_LANES";
  86. case POWER_DOMAIN_PORT_DDI_D_LANES:
  87. return "PORT_DDI_D_LANES";
  88. case POWER_DOMAIN_PORT_DDI_E_LANES:
  89. return "PORT_DDI_E_LANES";
  90. case POWER_DOMAIN_PORT_DDI_A_IO:
  91. return "PORT_DDI_A_IO";
  92. case POWER_DOMAIN_PORT_DDI_B_IO:
  93. return "PORT_DDI_B_IO";
  94. case POWER_DOMAIN_PORT_DDI_C_IO:
  95. return "PORT_DDI_C_IO";
  96. case POWER_DOMAIN_PORT_DDI_D_IO:
  97. return "PORT_DDI_D_IO";
  98. case POWER_DOMAIN_PORT_DDI_E_IO:
  99. return "PORT_DDI_E_IO";
  100. case POWER_DOMAIN_PORT_DSI:
  101. return "PORT_DSI";
  102. case POWER_DOMAIN_PORT_CRT:
  103. return "PORT_CRT";
  104. case POWER_DOMAIN_PORT_OTHER:
  105. return "PORT_OTHER";
  106. case POWER_DOMAIN_VGA:
  107. return "VGA";
  108. case POWER_DOMAIN_AUDIO:
  109. return "AUDIO";
  110. case POWER_DOMAIN_PLLS:
  111. return "PLLS";
  112. case POWER_DOMAIN_AUX_A:
  113. return "AUX_A";
  114. case POWER_DOMAIN_AUX_B:
  115. return "AUX_B";
  116. case POWER_DOMAIN_AUX_C:
  117. return "AUX_C";
  118. case POWER_DOMAIN_AUX_D:
  119. return "AUX_D";
  120. case POWER_DOMAIN_GMBUS:
  121. return "GMBUS";
  122. case POWER_DOMAIN_INIT:
  123. return "INIT";
  124. case POWER_DOMAIN_MODESET:
  125. return "MODESET";
  126. default:
  127. MISSING_CASE(domain);
  128. return "?";
  129. }
  130. }
  131. static void intel_power_well_enable(struct drm_i915_private *dev_priv,
  132. struct i915_power_well *power_well)
  133. {
  134. DRM_DEBUG_KMS("enabling %s\n", power_well->name);
  135. power_well->ops->enable(dev_priv, power_well);
  136. power_well->hw_enabled = true;
  137. }
  138. static void intel_power_well_disable(struct drm_i915_private *dev_priv,
  139. struct i915_power_well *power_well)
  140. {
  141. DRM_DEBUG_KMS("disabling %s\n", power_well->name);
  142. power_well->hw_enabled = false;
  143. power_well->ops->disable(dev_priv, power_well);
  144. }
  145. static void intel_power_well_get(struct drm_i915_private *dev_priv,
  146. struct i915_power_well *power_well)
  147. {
  148. if (!power_well->count++)
  149. intel_power_well_enable(dev_priv, power_well);
  150. }
  151. static void intel_power_well_put(struct drm_i915_private *dev_priv,
  152. struct i915_power_well *power_well)
  153. {
  154. WARN(!power_well->count, "Use count on power well %s is already zero",
  155. power_well->name);
  156. if (!--power_well->count)
  157. intel_power_well_disable(dev_priv, power_well);
  158. }
  159. /*
  160. * We should only use the power well if we explicitly asked the hardware to
  161. * enable it, so check if it's enabled and also check if we've requested it to
  162. * be enabled.
  163. */
  164. static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
  165. struct i915_power_well *power_well)
  166. {
  167. return I915_READ(HSW_PWR_WELL_DRIVER) ==
  168. (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
  169. }
  170. /**
  171. * __intel_display_power_is_enabled - unlocked check for a power domain
  172. * @dev_priv: i915 device instance
  173. * @domain: power domain to check
  174. *
  175. * This is the unlocked version of intel_display_power_is_enabled() and should
  176. * only be used from error capture and recovery code where deadlocks are
  177. * possible.
  178. *
  179. * Returns:
  180. * True when the power domain is enabled, false otherwise.
  181. */
  182. bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  183. enum intel_display_power_domain domain)
  184. {
  185. struct i915_power_well *power_well;
  186. bool is_enabled;
  187. if (dev_priv->pm.suspended)
  188. return false;
  189. is_enabled = true;
  190. for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
  191. if (power_well->always_on)
  192. continue;
  193. if (!power_well->hw_enabled) {
  194. is_enabled = false;
  195. break;
  196. }
  197. }
  198. return is_enabled;
  199. }
  200. /**
  201. * intel_display_power_is_enabled - check for a power domain
  202. * @dev_priv: i915 device instance
  203. * @domain: power domain to check
  204. *
  205. * This function can be used to check the hw power domain state. It is mostly
  206. * used in hardware state readout functions. Everywhere else code should rely
  207. * upon explicit power domain reference counting to ensure that the hardware
  208. * block is powered up before accessing it.
  209. *
  210. * Callers must hold the relevant modesetting locks to ensure that concurrent
  211. * threads can't disable the power well while the caller tries to read a few
  212. * registers.
  213. *
  214. * Returns:
  215. * True when the power domain is enabled, false otherwise.
  216. */
  217. bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  218. enum intel_display_power_domain domain)
  219. {
  220. struct i915_power_domains *power_domains;
  221. bool ret;
  222. power_domains = &dev_priv->power_domains;
  223. mutex_lock(&power_domains->lock);
  224. ret = __intel_display_power_is_enabled(dev_priv, domain);
  225. mutex_unlock(&power_domains->lock);
  226. return ret;
  227. }
  228. /**
  229. * intel_display_set_init_power - set the initial power domain state
  230. * @dev_priv: i915 device instance
  231. * @enable: whether to enable or disable the initial power domain state
  232. *
  233. * For simplicity our driver load/unload and system suspend/resume code assumes
  234. * that all power domains are always enabled. This functions controls the state
  235. * of this little hack. While the initial power domain state is enabled runtime
  236. * pm is effectively disabled.
  237. */
  238. void intel_display_set_init_power(struct drm_i915_private *dev_priv,
  239. bool enable)
  240. {
  241. if (dev_priv->power_domains.init_power_on == enable)
  242. return;
  243. if (enable)
  244. intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  245. else
  246. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  247. dev_priv->power_domains.init_power_on = enable;
  248. }
  249. /*
  250. * Starting with Haswell, we have a "Power Down Well" that can be turned off
  251. * when not needed anymore. We have 4 registers that can request the power well
  252. * to be enabled, and it will only be disabled if none of the registers is
  253. * requesting it to be enabled.
  254. */
  255. static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
  256. {
  257. struct pci_dev *pdev = dev_priv->drm.pdev;
  258. /*
  259. * After we re-enable the power well, if we touch VGA register 0x3d5
  260. * we'll get unclaimed register interrupts. This stops after we write
  261. * anything to the VGA MSR register. The vgacon module uses this
  262. * register all the time, so if we unbind our driver and, as a
  263. * consequence, bind vgacon, we'll get stuck in an infinite loop at
  264. * console_unlock(). So make here we touch the VGA MSR register, making
  265. * sure vgacon can keep working normally without triggering interrupts
  266. * and error messages.
  267. */
  268. vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
  269. outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  270. vga_put(pdev, VGA_RSRC_LEGACY_IO);
  271. if (IS_BROADWELL(dev_priv))
  272. gen8_irq_power_well_post_enable(dev_priv,
  273. 1 << PIPE_C | 1 << PIPE_B);
  274. }
  275. static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
  276. {
  277. if (IS_BROADWELL(dev_priv))
  278. gen8_irq_power_well_pre_disable(dev_priv,
  279. 1 << PIPE_C | 1 << PIPE_B);
  280. }
  281. static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
  282. struct i915_power_well *power_well)
  283. {
  284. struct pci_dev *pdev = dev_priv->drm.pdev;
  285. /*
  286. * After we re-enable the power well, if we touch VGA register 0x3d5
  287. * we'll get unclaimed register interrupts. This stops after we write
  288. * anything to the VGA MSR register. The vgacon module uses this
  289. * register all the time, so if we unbind our driver and, as a
  290. * consequence, bind vgacon, we'll get stuck in an infinite loop at
  291. * console_unlock(). So make here we touch the VGA MSR register, making
  292. * sure vgacon can keep working normally without triggering interrupts
  293. * and error messages.
  294. */
  295. if (power_well->id == SKL_DISP_PW_2) {
  296. vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
  297. outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  298. vga_put(pdev, VGA_RSRC_LEGACY_IO);
  299. gen8_irq_power_well_post_enable(dev_priv,
  300. 1 << PIPE_C | 1 << PIPE_B);
  301. }
  302. }
  303. static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
  304. struct i915_power_well *power_well)
  305. {
  306. if (power_well->id == SKL_DISP_PW_2)
  307. gen8_irq_power_well_pre_disable(dev_priv,
  308. 1 << PIPE_C | 1 << PIPE_B);
  309. }
  310. static void hsw_set_power_well(struct drm_i915_private *dev_priv,
  311. struct i915_power_well *power_well, bool enable)
  312. {
  313. bool is_enabled, enable_requested;
  314. uint32_t tmp;
  315. tmp = I915_READ(HSW_PWR_WELL_DRIVER);
  316. is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
  317. enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
  318. if (enable) {
  319. if (!enable_requested)
  320. I915_WRITE(HSW_PWR_WELL_DRIVER,
  321. HSW_PWR_WELL_ENABLE_REQUEST);
  322. if (!is_enabled) {
  323. DRM_DEBUG_KMS("Enabling power well\n");
  324. if (intel_wait_for_register(dev_priv,
  325. HSW_PWR_WELL_DRIVER,
  326. HSW_PWR_WELL_STATE_ENABLED,
  327. HSW_PWR_WELL_STATE_ENABLED,
  328. 20))
  329. DRM_ERROR("Timeout enabling power well\n");
  330. hsw_power_well_post_enable(dev_priv);
  331. }
  332. } else {
  333. if (enable_requested) {
  334. hsw_power_well_pre_disable(dev_priv);
  335. I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
  336. POSTING_READ(HSW_PWR_WELL_DRIVER);
  337. DRM_DEBUG_KMS("Requesting to disable the power well\n");
  338. }
  339. }
  340. }
  341. #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  342. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  343. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  344. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  345. BIT_ULL(POWER_DOMAIN_PIPE_C) | \
  346. BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
  347. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  348. BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  349. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  350. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  351. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  352. BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
  353. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  354. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  355. BIT_ULL(POWER_DOMAIN_AUX_D) | \
  356. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  357. BIT_ULL(POWER_DOMAIN_VGA) | \
  358. BIT_ULL(POWER_DOMAIN_INIT))
  359. #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
  360. BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
  361. BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
  362. BIT_ULL(POWER_DOMAIN_INIT))
  363. #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
  364. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
  365. BIT_ULL(POWER_DOMAIN_INIT))
  366. #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
  367. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
  368. BIT_ULL(POWER_DOMAIN_INIT))
  369. #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
  370. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
  371. BIT_ULL(POWER_DOMAIN_INIT))
  372. #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
  373. SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  374. BIT_ULL(POWER_DOMAIN_MODESET) | \
  375. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  376. BIT_ULL(POWER_DOMAIN_INIT))
  377. #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  378. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  379. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  380. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  381. BIT_ULL(POWER_DOMAIN_PIPE_C) | \
  382. BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
  383. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  384. BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  385. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  386. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  387. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  388. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  389. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  390. BIT_ULL(POWER_DOMAIN_VGA) | \
  391. BIT_ULL(POWER_DOMAIN_GMBUS) | \
  392. BIT_ULL(POWER_DOMAIN_INIT))
  393. #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
  394. BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  395. BIT_ULL(POWER_DOMAIN_MODESET) | \
  396. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  397. BIT_ULL(POWER_DOMAIN_INIT))
  398. #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
  399. BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
  400. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  401. BIT_ULL(POWER_DOMAIN_INIT))
  402. #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
  403. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  404. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  405. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  406. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  407. BIT_ULL(POWER_DOMAIN_INIT))
  408. #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  409. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  410. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  411. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  412. BIT_ULL(POWER_DOMAIN_PIPE_C) | \
  413. BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
  414. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  415. BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  416. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  417. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  418. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  419. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  420. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  421. BIT_ULL(POWER_DOMAIN_VGA) | \
  422. BIT_ULL(POWER_DOMAIN_INIT))
  423. #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
  424. BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
  425. #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
  426. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
  427. #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
  428. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
  429. #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
  430. BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
  431. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  432. BIT_ULL(POWER_DOMAIN_INIT))
  433. #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
  434. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  435. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  436. BIT_ULL(POWER_DOMAIN_INIT))
  437. #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
  438. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  439. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  440. BIT_ULL(POWER_DOMAIN_INIT))
  441. #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
  442. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  443. BIT_ULL(POWER_DOMAIN_INIT))
  444. #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
  445. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  446. BIT_ULL(POWER_DOMAIN_INIT))
  447. #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
  448. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  449. BIT_ULL(POWER_DOMAIN_INIT))
  450. #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
  451. GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  452. BIT_ULL(POWER_DOMAIN_MODESET) | \
  453. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  454. BIT_ULL(POWER_DOMAIN_INIT))
  455. static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
  456. {
  457. WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
  458. "DC9 already programmed to be enabled.\n");
  459. WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
  460. "DC5 still not disabled to enable DC9.\n");
  461. WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
  462. WARN_ONCE(intel_irqs_enabled(dev_priv),
  463. "Interrupts not disabled yet.\n");
  464. /*
  465. * TODO: check for the following to verify the conditions to enter DC9
  466. * state are satisfied:
  467. * 1] Check relevant display engine registers to verify if mode set
  468. * disable sequence was followed.
  469. * 2] Check if display uninitialize sequence is initialized.
  470. */
  471. }
  472. static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
  473. {
  474. WARN_ONCE(intel_irqs_enabled(dev_priv),
  475. "Interrupts not disabled yet.\n");
  476. WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
  477. "DC5 still not disabled.\n");
  478. /*
  479. * TODO: check for the following to verify DC9 state was indeed
  480. * entered before programming to disable it:
  481. * 1] Check relevant display engine registers to verify if mode
  482. * set disable sequence was followed.
  483. * 2] Check if display uninitialize sequence is initialized.
  484. */
  485. }
  486. static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
  487. u32 state)
  488. {
  489. int rewrites = 0;
  490. int rereads = 0;
  491. u32 v;
  492. I915_WRITE(DC_STATE_EN, state);
  493. /* It has been observed that disabling the dc6 state sometimes
  494. * doesn't stick and dmc keeps returning old value. Make sure
  495. * the write really sticks enough times and also force rewrite until
  496. * we are confident that state is exactly what we want.
  497. */
  498. do {
  499. v = I915_READ(DC_STATE_EN);
  500. if (v != state) {
  501. I915_WRITE(DC_STATE_EN, state);
  502. rewrites++;
  503. rereads = 0;
  504. } else if (rereads++ > 5) {
  505. break;
  506. }
  507. } while (rewrites < 100);
  508. if (v != state)
  509. DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
  510. state, v);
  511. /* Most of the times we need one retry, avoid spam */
  512. if (rewrites > 1)
  513. DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
  514. state, rewrites);
  515. }
  516. static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
  517. {
  518. u32 mask;
  519. mask = DC_STATE_EN_UPTO_DC5;
  520. if (IS_GEN9_LP(dev_priv))
  521. mask |= DC_STATE_EN_DC9;
  522. else
  523. mask |= DC_STATE_EN_UPTO_DC6;
  524. return mask;
  525. }
  526. void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
  527. {
  528. u32 val;
  529. val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
  530. DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
  531. dev_priv->csr.dc_state, val);
  532. dev_priv->csr.dc_state = val;
  533. }
  534. static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
  535. {
  536. uint32_t val;
  537. uint32_t mask;
  538. if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
  539. state &= dev_priv->csr.allowed_dc_mask;
  540. val = I915_READ(DC_STATE_EN);
  541. mask = gen9_dc_mask(dev_priv);
  542. DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
  543. val & mask, state);
  544. /* Check if DMC is ignoring our DC state requests */
  545. if ((val & mask) != dev_priv->csr.dc_state)
  546. DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
  547. dev_priv->csr.dc_state, val & mask);
  548. val &= ~mask;
  549. val |= state;
  550. gen9_write_dc_state(dev_priv, val);
  551. dev_priv->csr.dc_state = val & mask;
  552. }
  553. void bxt_enable_dc9(struct drm_i915_private *dev_priv)
  554. {
  555. assert_can_enable_dc9(dev_priv);
  556. DRM_DEBUG_KMS("Enabling DC9\n");
  557. intel_power_sequencer_reset(dev_priv);
  558. gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
  559. }
  560. void bxt_disable_dc9(struct drm_i915_private *dev_priv)
  561. {
  562. assert_can_disable_dc9(dev_priv);
  563. DRM_DEBUG_KMS("Disabling DC9\n");
  564. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  565. intel_pps_unlock_regs_wa(dev_priv);
  566. }
  567. static void assert_csr_loaded(struct drm_i915_private *dev_priv)
  568. {
  569. WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
  570. "CSR program storage start is NULL\n");
  571. WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
  572. WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
  573. }
  574. static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
  575. {
  576. bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
  577. SKL_DISP_PW_2);
  578. WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
  579. WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
  580. "DC5 already programmed to be enabled.\n");
  581. assert_rpm_wakelock_held(dev_priv);
  582. assert_csr_loaded(dev_priv);
  583. }
  584. void gen9_enable_dc5(struct drm_i915_private *dev_priv)
  585. {
  586. assert_can_enable_dc5(dev_priv);
  587. DRM_DEBUG_KMS("Enabling DC5\n");
  588. gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
  589. }
  590. static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
  591. {
  592. WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
  593. "Backlight is not disabled.\n");
  594. WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
  595. "DC6 already programmed to be enabled.\n");
  596. assert_csr_loaded(dev_priv);
  597. }
  598. void skl_enable_dc6(struct drm_i915_private *dev_priv)
  599. {
  600. assert_can_enable_dc6(dev_priv);
  601. DRM_DEBUG_KMS("Enabling DC6\n");
  602. gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
  603. }
  604. void skl_disable_dc6(struct drm_i915_private *dev_priv)
  605. {
  606. DRM_DEBUG_KMS("Disabling DC6\n");
  607. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  608. }
  609. static void
  610. gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
  611. struct i915_power_well *power_well)
  612. {
  613. enum skl_disp_power_wells power_well_id = power_well->id;
  614. u32 val;
  615. u32 mask;
  616. mask = SKL_POWER_WELL_REQ(power_well_id);
  617. val = I915_READ(HSW_PWR_WELL_KVMR);
  618. if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
  619. power_well->name))
  620. I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
  621. val = I915_READ(HSW_PWR_WELL_BIOS);
  622. val |= I915_READ(HSW_PWR_WELL_DEBUG);
  623. if (!(val & mask))
  624. return;
  625. /*
  626. * DMC is known to force on the request bits for power well 1 on SKL
  627. * and BXT and the misc IO power well on SKL but we don't expect any
  628. * other request bits to be set, so WARN for those.
  629. */
  630. if (power_well_id == SKL_DISP_PW_1 ||
  631. (IS_GEN9_BC(dev_priv) &&
  632. power_well_id == SKL_DISP_PW_MISC_IO))
  633. DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
  634. "by DMC\n", power_well->name);
  635. else
  636. WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
  637. power_well->name);
  638. I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
  639. I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
  640. }
  641. static void skl_set_power_well(struct drm_i915_private *dev_priv,
  642. struct i915_power_well *power_well, bool enable)
  643. {
  644. uint32_t tmp, fuse_status;
  645. uint32_t req_mask, state_mask;
  646. bool is_enabled, enable_requested, check_fuse_status = false;
  647. tmp = I915_READ(HSW_PWR_WELL_DRIVER);
  648. fuse_status = I915_READ(SKL_FUSE_STATUS);
  649. switch (power_well->id) {
  650. case SKL_DISP_PW_1:
  651. if (intel_wait_for_register(dev_priv,
  652. SKL_FUSE_STATUS,
  653. SKL_FUSE_PG0_DIST_STATUS,
  654. SKL_FUSE_PG0_DIST_STATUS,
  655. 1)) {
  656. DRM_ERROR("PG0 not enabled\n");
  657. return;
  658. }
  659. break;
  660. case SKL_DISP_PW_2:
  661. if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
  662. DRM_ERROR("PG1 in disabled state\n");
  663. return;
  664. }
  665. break;
  666. case SKL_DISP_PW_MISC_IO:
  667. case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A */
  668. case SKL_DISP_PW_DDI_B:
  669. case SKL_DISP_PW_DDI_C:
  670. case SKL_DISP_PW_DDI_D:
  671. case GLK_DISP_PW_AUX_A:
  672. case GLK_DISP_PW_AUX_B:
  673. case GLK_DISP_PW_AUX_C:
  674. break;
  675. default:
  676. WARN(1, "Unknown power well %lu\n", power_well->id);
  677. return;
  678. }
  679. req_mask = SKL_POWER_WELL_REQ(power_well->id);
  680. enable_requested = tmp & req_mask;
  681. state_mask = SKL_POWER_WELL_STATE(power_well->id);
  682. is_enabled = tmp & state_mask;
  683. if (!enable && enable_requested)
  684. skl_power_well_pre_disable(dev_priv, power_well);
  685. if (enable) {
  686. if (!enable_requested) {
  687. WARN((tmp & state_mask) &&
  688. !I915_READ(HSW_PWR_WELL_BIOS),
  689. "Invalid for power well status to be enabled, unless done by the BIOS, \
  690. when request is to disable!\n");
  691. I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
  692. }
  693. if (!is_enabled) {
  694. DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
  695. check_fuse_status = true;
  696. }
  697. } else {
  698. if (enable_requested) {
  699. I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
  700. POSTING_READ(HSW_PWR_WELL_DRIVER);
  701. DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
  702. }
  703. if (IS_GEN9(dev_priv))
  704. gen9_sanitize_power_well_requests(dev_priv, power_well);
  705. }
  706. if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
  707. 1))
  708. DRM_ERROR("%s %s timeout\n",
  709. power_well->name, enable ? "enable" : "disable");
  710. if (check_fuse_status) {
  711. if (power_well->id == SKL_DISP_PW_1) {
  712. if (intel_wait_for_register(dev_priv,
  713. SKL_FUSE_STATUS,
  714. SKL_FUSE_PG1_DIST_STATUS,
  715. SKL_FUSE_PG1_DIST_STATUS,
  716. 1))
  717. DRM_ERROR("PG1 distributing status timeout\n");
  718. } else if (power_well->id == SKL_DISP_PW_2) {
  719. if (intel_wait_for_register(dev_priv,
  720. SKL_FUSE_STATUS,
  721. SKL_FUSE_PG2_DIST_STATUS,
  722. SKL_FUSE_PG2_DIST_STATUS,
  723. 1))
  724. DRM_ERROR("PG2 distributing status timeout\n");
  725. }
  726. }
  727. if (enable && !is_enabled)
  728. skl_power_well_post_enable(dev_priv, power_well);
  729. }
  730. static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
  731. struct i915_power_well *power_well)
  732. {
  733. /* Take over the request bit if set by BIOS. */
  734. if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) {
  735. if (!(I915_READ(HSW_PWR_WELL_DRIVER) &
  736. HSW_PWR_WELL_ENABLE_REQUEST))
  737. I915_WRITE(HSW_PWR_WELL_DRIVER,
  738. HSW_PWR_WELL_ENABLE_REQUEST);
  739. I915_WRITE(HSW_PWR_WELL_BIOS, 0);
  740. }
  741. }
  742. static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
  743. struct i915_power_well *power_well)
  744. {
  745. hsw_set_power_well(dev_priv, power_well, true);
  746. }
  747. static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
  748. struct i915_power_well *power_well)
  749. {
  750. hsw_set_power_well(dev_priv, power_well, false);
  751. }
  752. static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
  753. struct i915_power_well *power_well)
  754. {
  755. uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
  756. SKL_POWER_WELL_STATE(power_well->id);
  757. return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
  758. }
  759. static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
  760. struct i915_power_well *power_well)
  761. {
  762. uint32_t mask = SKL_POWER_WELL_REQ(power_well->id);
  763. uint32_t bios_req = I915_READ(HSW_PWR_WELL_BIOS);
  764. /* Take over the request bit if set by BIOS. */
  765. if (bios_req & mask) {
  766. uint32_t drv_req = I915_READ(HSW_PWR_WELL_DRIVER);
  767. if (!(drv_req & mask))
  768. I915_WRITE(HSW_PWR_WELL_DRIVER, drv_req | mask);
  769. I915_WRITE(HSW_PWR_WELL_BIOS, bios_req & ~mask);
  770. }
  771. }
  772. static void skl_power_well_enable(struct drm_i915_private *dev_priv,
  773. struct i915_power_well *power_well)
  774. {
  775. skl_set_power_well(dev_priv, power_well, true);
  776. }
  777. static void skl_power_well_disable(struct drm_i915_private *dev_priv,
  778. struct i915_power_well *power_well)
  779. {
  780. skl_set_power_well(dev_priv, power_well, false);
  781. }
  782. static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  783. struct i915_power_well *power_well)
  784. {
  785. bxt_ddi_phy_init(dev_priv, power_well->data);
  786. }
  787. static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  788. struct i915_power_well *power_well)
  789. {
  790. bxt_ddi_phy_uninit(dev_priv, power_well->data);
  791. }
  792. static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
  793. struct i915_power_well *power_well)
  794. {
  795. return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
  796. }
  797. static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
  798. {
  799. struct i915_power_well *power_well;
  800. power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
  801. if (power_well->count > 0)
  802. bxt_ddi_phy_verify_state(dev_priv, power_well->data);
  803. power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
  804. if (power_well->count > 0)
  805. bxt_ddi_phy_verify_state(dev_priv, power_well->data);
  806. if (IS_GEMINILAKE(dev_priv)) {
  807. power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
  808. if (power_well->count > 0)
  809. bxt_ddi_phy_verify_state(dev_priv, power_well->data);
  810. }
  811. }
  812. static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
  813. struct i915_power_well *power_well)
  814. {
  815. return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
  816. }
  817. static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
  818. {
  819. u32 tmp = I915_READ(DBUF_CTL);
  820. WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
  821. (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
  822. "Unexpected DBuf power power state (0x%08x)\n", tmp);
  823. }
  824. static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
  825. struct i915_power_well *power_well)
  826. {
  827. struct intel_cdclk_state cdclk_state = {};
  828. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  829. dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
  830. WARN_ON(!intel_cdclk_state_compare(&dev_priv->cdclk.hw, &cdclk_state));
  831. gen9_assert_dbuf_enabled(dev_priv);
  832. if (IS_GEN9_LP(dev_priv))
  833. bxt_verify_ddi_phy_power_wells(dev_priv);
  834. }
  835. static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
  836. struct i915_power_well *power_well)
  837. {
  838. if (!dev_priv->csr.dmc_payload)
  839. return;
  840. if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
  841. skl_enable_dc6(dev_priv);
  842. else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
  843. gen9_enable_dc5(dev_priv);
  844. }
  845. static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
  846. struct i915_power_well *power_well)
  847. {
  848. }
  849. static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
  850. struct i915_power_well *power_well)
  851. {
  852. }
  853. static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
  854. struct i915_power_well *power_well)
  855. {
  856. return true;
  857. }
  858. static void vlv_set_power_well(struct drm_i915_private *dev_priv,
  859. struct i915_power_well *power_well, bool enable)
  860. {
  861. enum punit_power_well power_well_id = power_well->id;
  862. u32 mask;
  863. u32 state;
  864. u32 ctrl;
  865. mask = PUNIT_PWRGT_MASK(power_well_id);
  866. state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
  867. PUNIT_PWRGT_PWR_GATE(power_well_id);
  868. mutex_lock(&dev_priv->rps.hw_lock);
  869. #define COND \
  870. ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
  871. if (COND)
  872. goto out;
  873. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
  874. ctrl &= ~mask;
  875. ctrl |= state;
  876. vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
  877. if (wait_for(COND, 100))
  878. DRM_ERROR("timeout setting power well state %08x (%08x)\n",
  879. state,
  880. vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
  881. #undef COND
  882. out:
  883. mutex_unlock(&dev_priv->rps.hw_lock);
  884. }
  885. static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
  886. struct i915_power_well *power_well)
  887. {
  888. vlv_set_power_well(dev_priv, power_well, true);
  889. }
  890. static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
  891. struct i915_power_well *power_well)
  892. {
  893. vlv_set_power_well(dev_priv, power_well, false);
  894. }
  895. static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
  896. struct i915_power_well *power_well)
  897. {
  898. int power_well_id = power_well->id;
  899. bool enabled = false;
  900. u32 mask;
  901. u32 state;
  902. u32 ctrl;
  903. mask = PUNIT_PWRGT_MASK(power_well_id);
  904. ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
  905. mutex_lock(&dev_priv->rps.hw_lock);
  906. state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
  907. /*
  908. * We only ever set the power-on and power-gate states, anything
  909. * else is unexpected.
  910. */
  911. WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
  912. state != PUNIT_PWRGT_PWR_GATE(power_well_id));
  913. if (state == ctrl)
  914. enabled = true;
  915. /*
  916. * A transient state at this point would mean some unexpected party
  917. * is poking at the power controls too.
  918. */
  919. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
  920. WARN_ON(ctrl != state);
  921. mutex_unlock(&dev_priv->rps.hw_lock);
  922. return enabled;
  923. }
  924. static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
  925. {
  926. u32 val;
  927. /*
  928. * On driver load, a pipe may be active and driving a DSI display.
  929. * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
  930. * (and never recovering) in this case. intel_dsi_post_disable() will
  931. * clear it when we turn off the display.
  932. */
  933. val = I915_READ(DSPCLK_GATE_D);
  934. val &= DPOUNIT_CLOCK_GATE_DISABLE;
  935. val |= VRHUNIT_CLOCK_GATE_DISABLE;
  936. I915_WRITE(DSPCLK_GATE_D, val);
  937. /*
  938. * Disable trickle feed and enable pnd deadline calculation
  939. */
  940. I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
  941. I915_WRITE(CBR1_VLV, 0);
  942. WARN_ON(dev_priv->rawclk_freq == 0);
  943. I915_WRITE(RAWCLK_FREQ_VLV,
  944. DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
  945. }
  946. static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
  947. {
  948. struct intel_encoder *encoder;
  949. enum pipe pipe;
  950. /*
  951. * Enable the CRI clock source so we can get at the
  952. * display and the reference clock for VGA
  953. * hotplug / manual detection. Supposedly DSI also
  954. * needs the ref clock up and running.
  955. *
  956. * CHV DPLL B/C have some issues if VGA mode is enabled.
  957. */
  958. for_each_pipe(dev_priv, pipe) {
  959. u32 val = I915_READ(DPLL(pipe));
  960. val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  961. if (pipe != PIPE_A)
  962. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  963. I915_WRITE(DPLL(pipe), val);
  964. }
  965. vlv_init_display_clock_gating(dev_priv);
  966. spin_lock_irq(&dev_priv->irq_lock);
  967. valleyview_enable_display_irqs(dev_priv);
  968. spin_unlock_irq(&dev_priv->irq_lock);
  969. /*
  970. * During driver initialization/resume we can avoid restoring the
  971. * part of the HW/SW state that will be inited anyway explicitly.
  972. */
  973. if (dev_priv->power_domains.initializing)
  974. return;
  975. intel_hpd_init(dev_priv);
  976. /* Re-enable the ADPA, if we have one */
  977. for_each_intel_encoder(&dev_priv->drm, encoder) {
  978. if (encoder->type == INTEL_OUTPUT_ANALOG)
  979. intel_crt_reset(&encoder->base);
  980. }
  981. i915_redisable_vga_power_on(dev_priv);
  982. intel_pps_unlock_regs_wa(dev_priv);
  983. }
  984. static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
  985. {
  986. spin_lock_irq(&dev_priv->irq_lock);
  987. valleyview_disable_display_irqs(dev_priv);
  988. spin_unlock_irq(&dev_priv->irq_lock);
  989. /* make sure we're done processing display irqs */
  990. synchronize_irq(dev_priv->drm.irq);
  991. intel_power_sequencer_reset(dev_priv);
  992. /* Prevent us from re-enabling polling on accident in late suspend */
  993. if (!dev_priv->drm.dev->power.is_suspended)
  994. intel_hpd_poll_init(dev_priv);
  995. }
  996. static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
  997. struct i915_power_well *power_well)
  998. {
  999. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
  1000. vlv_set_power_well(dev_priv, power_well, true);
  1001. vlv_display_power_well_init(dev_priv);
  1002. }
  1003. static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
  1004. struct i915_power_well *power_well)
  1005. {
  1006. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
  1007. vlv_display_power_well_deinit(dev_priv);
  1008. vlv_set_power_well(dev_priv, power_well, false);
  1009. }
  1010. static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  1011. struct i915_power_well *power_well)
  1012. {
  1013. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
  1014. /* since ref/cri clock was enabled */
  1015. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  1016. vlv_set_power_well(dev_priv, power_well, true);
  1017. /*
  1018. * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
  1019. * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
  1020. * a. GUnit 0x2110 bit[0] set to 1 (def 0)
  1021. * b. The other bits such as sfr settings / modesel may all
  1022. * be set to 0.
  1023. *
  1024. * This should only be done on init and resume from S3 with
  1025. * both PLLs disabled, or we risk losing DPIO and PLL
  1026. * synchronization.
  1027. */
  1028. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
  1029. }
  1030. static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  1031. struct i915_power_well *power_well)
  1032. {
  1033. enum pipe pipe;
  1034. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
  1035. for_each_pipe(dev_priv, pipe)
  1036. assert_pll_disabled(dev_priv, pipe);
  1037. /* Assert common reset */
  1038. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
  1039. vlv_set_power_well(dev_priv, power_well, false);
  1040. }
  1041. #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
  1042. static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
  1043. int power_well_id)
  1044. {
  1045. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1046. int i;
  1047. for (i = 0; i < power_domains->power_well_count; i++) {
  1048. struct i915_power_well *power_well;
  1049. power_well = &power_domains->power_wells[i];
  1050. if (power_well->id == power_well_id)
  1051. return power_well;
  1052. }
  1053. return NULL;
  1054. }
  1055. #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
  1056. static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
  1057. {
  1058. struct i915_power_well *cmn_bc =
  1059. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  1060. struct i915_power_well *cmn_d =
  1061. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
  1062. u32 phy_control = dev_priv->chv_phy_control;
  1063. u32 phy_status = 0;
  1064. u32 phy_status_mask = 0xffffffff;
  1065. /*
  1066. * The BIOS can leave the PHY is some weird state
  1067. * where it doesn't fully power down some parts.
  1068. * Disable the asserts until the PHY has been fully
  1069. * reset (ie. the power well has been disabled at
  1070. * least once).
  1071. */
  1072. if (!dev_priv->chv_phy_assert[DPIO_PHY0])
  1073. phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
  1074. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
  1075. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
  1076. PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
  1077. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
  1078. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
  1079. if (!dev_priv->chv_phy_assert[DPIO_PHY1])
  1080. phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
  1081. PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
  1082. PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
  1083. if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
  1084. phy_status |= PHY_POWERGOOD(DPIO_PHY0);
  1085. /* this assumes override is only used to enable lanes */
  1086. if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
  1087. phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
  1088. if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
  1089. phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
  1090. /* CL1 is on whenever anything is on in either channel */
  1091. if (BITS_SET(phy_control,
  1092. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
  1093. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
  1094. phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
  1095. /*
  1096. * The DPLLB check accounts for the pipe B + port A usage
  1097. * with CL2 powered up but all the lanes in the second channel
  1098. * powered down.
  1099. */
  1100. if (BITS_SET(phy_control,
  1101. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
  1102. (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
  1103. phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
  1104. if (BITS_SET(phy_control,
  1105. PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
  1106. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
  1107. if (BITS_SET(phy_control,
  1108. PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
  1109. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
  1110. if (BITS_SET(phy_control,
  1111. PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
  1112. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
  1113. if (BITS_SET(phy_control,
  1114. PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
  1115. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
  1116. }
  1117. if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
  1118. phy_status |= PHY_POWERGOOD(DPIO_PHY1);
  1119. /* this assumes override is only used to enable lanes */
  1120. if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
  1121. phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
  1122. if (BITS_SET(phy_control,
  1123. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
  1124. phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
  1125. if (BITS_SET(phy_control,
  1126. PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
  1127. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
  1128. if (BITS_SET(phy_control,
  1129. PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
  1130. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
  1131. }
  1132. phy_status &= phy_status_mask;
  1133. /*
  1134. * The PHY may be busy with some initial calibration and whatnot,
  1135. * so the power state can take a while to actually change.
  1136. */
  1137. if (intel_wait_for_register(dev_priv,
  1138. DISPLAY_PHY_STATUS,
  1139. phy_status_mask,
  1140. phy_status,
  1141. 10))
  1142. DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
  1143. I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
  1144. phy_status, dev_priv->chv_phy_control);
  1145. }
  1146. #undef BITS_SET
  1147. static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  1148. struct i915_power_well *power_well)
  1149. {
  1150. enum dpio_phy phy;
  1151. enum pipe pipe;
  1152. uint32_t tmp;
  1153. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  1154. power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
  1155. if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  1156. pipe = PIPE_A;
  1157. phy = DPIO_PHY0;
  1158. } else {
  1159. pipe = PIPE_C;
  1160. phy = DPIO_PHY1;
  1161. }
  1162. /* since ref/cri clock was enabled */
  1163. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  1164. vlv_set_power_well(dev_priv, power_well, true);
  1165. /* Poll for phypwrgood signal */
  1166. if (intel_wait_for_register(dev_priv,
  1167. DISPLAY_PHY_STATUS,
  1168. PHY_POWERGOOD(phy),
  1169. PHY_POWERGOOD(phy),
  1170. 1))
  1171. DRM_ERROR("Display PHY %d is not power up\n", phy);
  1172. mutex_lock(&dev_priv->sb_lock);
  1173. /* Enable dynamic power down */
  1174. tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
  1175. tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
  1176. DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
  1177. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
  1178. if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  1179. tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
  1180. tmp |= DPIO_DYNPWRDOWNEN_CH1;
  1181. vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
  1182. } else {
  1183. /*
  1184. * Force the non-existing CL2 off. BXT does this
  1185. * too, so maybe it saves some power even though
  1186. * CL2 doesn't exist?
  1187. */
  1188. tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
  1189. tmp |= DPIO_CL2_LDOFUSE_PWRENB;
  1190. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
  1191. }
  1192. mutex_unlock(&dev_priv->sb_lock);
  1193. dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
  1194. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1195. DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
  1196. phy, dev_priv->chv_phy_control);
  1197. assert_chv_phy_status(dev_priv);
  1198. }
  1199. static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  1200. struct i915_power_well *power_well)
  1201. {
  1202. enum dpio_phy phy;
  1203. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  1204. power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
  1205. if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  1206. phy = DPIO_PHY0;
  1207. assert_pll_disabled(dev_priv, PIPE_A);
  1208. assert_pll_disabled(dev_priv, PIPE_B);
  1209. } else {
  1210. phy = DPIO_PHY1;
  1211. assert_pll_disabled(dev_priv, PIPE_C);
  1212. }
  1213. dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
  1214. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1215. vlv_set_power_well(dev_priv, power_well, false);
  1216. DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
  1217. phy, dev_priv->chv_phy_control);
  1218. /* PHY is fully reset now, so we can enable the PHY state asserts */
  1219. dev_priv->chv_phy_assert[phy] = true;
  1220. assert_chv_phy_status(dev_priv);
  1221. }
  1222. static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
  1223. enum dpio_channel ch, bool override, unsigned int mask)
  1224. {
  1225. enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
  1226. u32 reg, val, expected, actual;
  1227. /*
  1228. * The BIOS can leave the PHY is some weird state
  1229. * where it doesn't fully power down some parts.
  1230. * Disable the asserts until the PHY has been fully
  1231. * reset (ie. the power well has been disabled at
  1232. * least once).
  1233. */
  1234. if (!dev_priv->chv_phy_assert[phy])
  1235. return;
  1236. if (ch == DPIO_CH0)
  1237. reg = _CHV_CMN_DW0_CH0;
  1238. else
  1239. reg = _CHV_CMN_DW6_CH1;
  1240. mutex_lock(&dev_priv->sb_lock);
  1241. val = vlv_dpio_read(dev_priv, pipe, reg);
  1242. mutex_unlock(&dev_priv->sb_lock);
  1243. /*
  1244. * This assumes !override is only used when the port is disabled.
  1245. * All lanes should power down even without the override when
  1246. * the port is disabled.
  1247. */
  1248. if (!override || mask == 0xf) {
  1249. expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
  1250. /*
  1251. * If CH1 common lane is not active anymore
  1252. * (eg. for pipe B DPLL) the entire channel will
  1253. * shut down, which causes the common lane registers
  1254. * to read as 0. That means we can't actually check
  1255. * the lane power down status bits, but as the entire
  1256. * register reads as 0 it's a good indication that the
  1257. * channel is indeed entirely powered down.
  1258. */
  1259. if (ch == DPIO_CH1 && val == 0)
  1260. expected = 0;
  1261. } else if (mask != 0x0) {
  1262. expected = DPIO_ANYDL_POWERDOWN;
  1263. } else {
  1264. expected = 0;
  1265. }
  1266. if (ch == DPIO_CH0)
  1267. actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
  1268. else
  1269. actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
  1270. actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
  1271. WARN(actual != expected,
  1272. "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
  1273. !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
  1274. !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
  1275. reg, val);
  1276. }
  1277. bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
  1278. enum dpio_channel ch, bool override)
  1279. {
  1280. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1281. bool was_override;
  1282. mutex_lock(&power_domains->lock);
  1283. was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1284. if (override == was_override)
  1285. goto out;
  1286. if (override)
  1287. dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1288. else
  1289. dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1290. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1291. DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
  1292. phy, ch, dev_priv->chv_phy_control);
  1293. assert_chv_phy_status(dev_priv);
  1294. out:
  1295. mutex_unlock(&power_domains->lock);
  1296. return was_override;
  1297. }
  1298. void chv_phy_powergate_lanes(struct intel_encoder *encoder,
  1299. bool override, unsigned int mask)
  1300. {
  1301. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  1302. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1303. enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
  1304. enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
  1305. mutex_lock(&power_domains->lock);
  1306. dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
  1307. dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
  1308. if (override)
  1309. dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1310. else
  1311. dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1312. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1313. DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
  1314. phy, ch, mask, dev_priv->chv_phy_control);
  1315. assert_chv_phy_status(dev_priv);
  1316. assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
  1317. mutex_unlock(&power_domains->lock);
  1318. }
  1319. static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
  1320. struct i915_power_well *power_well)
  1321. {
  1322. enum pipe pipe = power_well->id;
  1323. bool enabled;
  1324. u32 state, ctrl;
  1325. mutex_lock(&dev_priv->rps.hw_lock);
  1326. state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
  1327. /*
  1328. * We only ever set the power-on and power-gate states, anything
  1329. * else is unexpected.
  1330. */
  1331. WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
  1332. enabled = state == DP_SSS_PWR_ON(pipe);
  1333. /*
  1334. * A transient state at this point would mean some unexpected party
  1335. * is poking at the power controls too.
  1336. */
  1337. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
  1338. WARN_ON(ctrl << 16 != state);
  1339. mutex_unlock(&dev_priv->rps.hw_lock);
  1340. return enabled;
  1341. }
  1342. static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
  1343. struct i915_power_well *power_well,
  1344. bool enable)
  1345. {
  1346. enum pipe pipe = power_well->id;
  1347. u32 state;
  1348. u32 ctrl;
  1349. state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
  1350. mutex_lock(&dev_priv->rps.hw_lock);
  1351. #define COND \
  1352. ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
  1353. if (COND)
  1354. goto out;
  1355. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  1356. ctrl &= ~DP_SSC_MASK(pipe);
  1357. ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
  1358. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
  1359. if (wait_for(COND, 100))
  1360. DRM_ERROR("timeout setting power well state %08x (%08x)\n",
  1361. state,
  1362. vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
  1363. #undef COND
  1364. out:
  1365. mutex_unlock(&dev_priv->rps.hw_lock);
  1366. }
  1367. static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
  1368. struct i915_power_well *power_well)
  1369. {
  1370. WARN_ON_ONCE(power_well->id != PIPE_A);
  1371. chv_set_pipe_power_well(dev_priv, power_well, true);
  1372. vlv_display_power_well_init(dev_priv);
  1373. }
  1374. static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
  1375. struct i915_power_well *power_well)
  1376. {
  1377. WARN_ON_ONCE(power_well->id != PIPE_A);
  1378. vlv_display_power_well_deinit(dev_priv);
  1379. chv_set_pipe_power_well(dev_priv, power_well, false);
  1380. }
  1381. static void
  1382. __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
  1383. enum intel_display_power_domain domain)
  1384. {
  1385. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1386. struct i915_power_well *power_well;
  1387. for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
  1388. intel_power_well_get(dev_priv, power_well);
  1389. power_domains->domain_use_count[domain]++;
  1390. }
  1391. /**
  1392. * intel_display_power_get - grab a power domain reference
  1393. * @dev_priv: i915 device instance
  1394. * @domain: power domain to reference
  1395. *
  1396. * This function grabs a power domain reference for @domain and ensures that the
  1397. * power domain and all its parents are powered up. Therefore users should only
  1398. * grab a reference to the innermost power domain they need.
  1399. *
  1400. * Any power domain reference obtained by this function must have a symmetric
  1401. * call to intel_display_power_put() to release the reference again.
  1402. */
  1403. void intel_display_power_get(struct drm_i915_private *dev_priv,
  1404. enum intel_display_power_domain domain)
  1405. {
  1406. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1407. intel_runtime_pm_get(dev_priv);
  1408. mutex_lock(&power_domains->lock);
  1409. __intel_display_power_get_domain(dev_priv, domain);
  1410. mutex_unlock(&power_domains->lock);
  1411. }
  1412. /**
  1413. * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
  1414. * @dev_priv: i915 device instance
  1415. * @domain: power domain to reference
  1416. *
  1417. * This function grabs a power domain reference for @domain and ensures that the
  1418. * power domain and all its parents are powered up. Therefore users should only
  1419. * grab a reference to the innermost power domain they need.
  1420. *
  1421. * Any power domain reference obtained by this function must have a symmetric
  1422. * call to intel_display_power_put() to release the reference again.
  1423. */
  1424. bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
  1425. enum intel_display_power_domain domain)
  1426. {
  1427. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1428. bool is_enabled;
  1429. if (!intel_runtime_pm_get_if_in_use(dev_priv))
  1430. return false;
  1431. mutex_lock(&power_domains->lock);
  1432. if (__intel_display_power_is_enabled(dev_priv, domain)) {
  1433. __intel_display_power_get_domain(dev_priv, domain);
  1434. is_enabled = true;
  1435. } else {
  1436. is_enabled = false;
  1437. }
  1438. mutex_unlock(&power_domains->lock);
  1439. if (!is_enabled)
  1440. intel_runtime_pm_put(dev_priv);
  1441. return is_enabled;
  1442. }
  1443. /**
  1444. * intel_display_power_put - release a power domain reference
  1445. * @dev_priv: i915 device instance
  1446. * @domain: power domain to reference
  1447. *
  1448. * This function drops the power domain reference obtained by
  1449. * intel_display_power_get() and might power down the corresponding hardware
  1450. * block right away if this is the last reference.
  1451. */
  1452. void intel_display_power_put(struct drm_i915_private *dev_priv,
  1453. enum intel_display_power_domain domain)
  1454. {
  1455. struct i915_power_domains *power_domains;
  1456. struct i915_power_well *power_well;
  1457. power_domains = &dev_priv->power_domains;
  1458. mutex_lock(&power_domains->lock);
  1459. WARN(!power_domains->domain_use_count[domain],
  1460. "Use count on domain %s is already zero\n",
  1461. intel_display_power_domain_str(domain));
  1462. power_domains->domain_use_count[domain]--;
  1463. for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
  1464. intel_power_well_put(dev_priv, power_well);
  1465. mutex_unlock(&power_domains->lock);
  1466. intel_runtime_pm_put(dev_priv);
  1467. }
  1468. #define HSW_DISPLAY_POWER_DOMAINS ( \
  1469. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  1470. BIT_ULL(POWER_DOMAIN_PIPE_C) | \
  1471. BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  1472. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1473. BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1474. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  1475. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  1476. BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
  1477. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1478. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1479. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1480. BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
  1481. BIT_ULL(POWER_DOMAIN_VGA) | \
  1482. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  1483. BIT_ULL(POWER_DOMAIN_INIT))
  1484. #define BDW_DISPLAY_POWER_DOMAINS ( \
  1485. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  1486. BIT_ULL(POWER_DOMAIN_PIPE_C) | \
  1487. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1488. BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1489. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  1490. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  1491. BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
  1492. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1493. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1494. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1495. BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
  1496. BIT_ULL(POWER_DOMAIN_VGA) | \
  1497. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  1498. BIT_ULL(POWER_DOMAIN_INIT))
  1499. #define VLV_DISPLAY_POWER_DOMAINS ( \
  1500. BIT_ULL(POWER_DOMAIN_PIPE_A) | \
  1501. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  1502. BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  1503. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1504. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  1505. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  1506. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1507. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1508. BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
  1509. BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
  1510. BIT_ULL(POWER_DOMAIN_VGA) | \
  1511. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  1512. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1513. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1514. BIT_ULL(POWER_DOMAIN_GMBUS) | \
  1515. BIT_ULL(POWER_DOMAIN_INIT))
  1516. #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
  1517. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1518. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1519. BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
  1520. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1521. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1522. BIT_ULL(POWER_DOMAIN_INIT))
  1523. #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
  1524. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1525. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1526. BIT_ULL(POWER_DOMAIN_INIT))
  1527. #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
  1528. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1529. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1530. BIT_ULL(POWER_DOMAIN_INIT))
  1531. #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
  1532. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1533. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1534. BIT_ULL(POWER_DOMAIN_INIT))
  1535. #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
  1536. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1537. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1538. BIT_ULL(POWER_DOMAIN_INIT))
  1539. #define CHV_DISPLAY_POWER_DOMAINS ( \
  1540. BIT_ULL(POWER_DOMAIN_PIPE_A) | \
  1541. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  1542. BIT_ULL(POWER_DOMAIN_PIPE_C) | \
  1543. BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  1544. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1545. BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1546. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  1547. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  1548. BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
  1549. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1550. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1551. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1552. BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
  1553. BIT_ULL(POWER_DOMAIN_VGA) | \
  1554. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  1555. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1556. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1557. BIT_ULL(POWER_DOMAIN_AUX_D) | \
  1558. BIT_ULL(POWER_DOMAIN_GMBUS) | \
  1559. BIT_ULL(POWER_DOMAIN_INIT))
  1560. #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
  1561. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1562. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1563. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1564. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1565. BIT_ULL(POWER_DOMAIN_INIT))
  1566. #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
  1567. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1568. BIT_ULL(POWER_DOMAIN_AUX_D) | \
  1569. BIT_ULL(POWER_DOMAIN_INIT))
  1570. static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
  1571. .sync_hw = i9xx_power_well_sync_hw_noop,
  1572. .enable = i9xx_always_on_power_well_noop,
  1573. .disable = i9xx_always_on_power_well_noop,
  1574. .is_enabled = i9xx_always_on_power_well_enabled,
  1575. };
  1576. static const struct i915_power_well_ops chv_pipe_power_well_ops = {
  1577. .sync_hw = i9xx_power_well_sync_hw_noop,
  1578. .enable = chv_pipe_power_well_enable,
  1579. .disable = chv_pipe_power_well_disable,
  1580. .is_enabled = chv_pipe_power_well_enabled,
  1581. };
  1582. static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
  1583. .sync_hw = i9xx_power_well_sync_hw_noop,
  1584. .enable = chv_dpio_cmn_power_well_enable,
  1585. .disable = chv_dpio_cmn_power_well_disable,
  1586. .is_enabled = vlv_power_well_enabled,
  1587. };
  1588. static struct i915_power_well i9xx_always_on_power_well[] = {
  1589. {
  1590. .name = "always-on",
  1591. .always_on = 1,
  1592. .domains = POWER_DOMAIN_MASK,
  1593. .ops = &i9xx_always_on_power_well_ops,
  1594. },
  1595. };
  1596. static const struct i915_power_well_ops hsw_power_well_ops = {
  1597. .sync_hw = hsw_power_well_sync_hw,
  1598. .enable = hsw_power_well_enable,
  1599. .disable = hsw_power_well_disable,
  1600. .is_enabled = hsw_power_well_enabled,
  1601. };
  1602. static const struct i915_power_well_ops skl_power_well_ops = {
  1603. .sync_hw = skl_power_well_sync_hw,
  1604. .enable = skl_power_well_enable,
  1605. .disable = skl_power_well_disable,
  1606. .is_enabled = skl_power_well_enabled,
  1607. };
  1608. static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
  1609. .sync_hw = i9xx_power_well_sync_hw_noop,
  1610. .enable = gen9_dc_off_power_well_enable,
  1611. .disable = gen9_dc_off_power_well_disable,
  1612. .is_enabled = gen9_dc_off_power_well_enabled,
  1613. };
  1614. static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
  1615. .sync_hw = i9xx_power_well_sync_hw_noop,
  1616. .enable = bxt_dpio_cmn_power_well_enable,
  1617. .disable = bxt_dpio_cmn_power_well_disable,
  1618. .is_enabled = bxt_dpio_cmn_power_well_enabled,
  1619. };
  1620. static struct i915_power_well hsw_power_wells[] = {
  1621. {
  1622. .name = "always-on",
  1623. .always_on = 1,
  1624. .domains = POWER_DOMAIN_MASK,
  1625. .ops = &i9xx_always_on_power_well_ops,
  1626. },
  1627. {
  1628. .name = "display",
  1629. .domains = HSW_DISPLAY_POWER_DOMAINS,
  1630. .ops = &hsw_power_well_ops,
  1631. },
  1632. };
  1633. static struct i915_power_well bdw_power_wells[] = {
  1634. {
  1635. .name = "always-on",
  1636. .always_on = 1,
  1637. .domains = POWER_DOMAIN_MASK,
  1638. .ops = &i9xx_always_on_power_well_ops,
  1639. },
  1640. {
  1641. .name = "display",
  1642. .domains = BDW_DISPLAY_POWER_DOMAINS,
  1643. .ops = &hsw_power_well_ops,
  1644. },
  1645. };
  1646. static const struct i915_power_well_ops vlv_display_power_well_ops = {
  1647. .sync_hw = i9xx_power_well_sync_hw_noop,
  1648. .enable = vlv_display_power_well_enable,
  1649. .disable = vlv_display_power_well_disable,
  1650. .is_enabled = vlv_power_well_enabled,
  1651. };
  1652. static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
  1653. .sync_hw = i9xx_power_well_sync_hw_noop,
  1654. .enable = vlv_dpio_cmn_power_well_enable,
  1655. .disable = vlv_dpio_cmn_power_well_disable,
  1656. .is_enabled = vlv_power_well_enabled,
  1657. };
  1658. static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
  1659. .sync_hw = i9xx_power_well_sync_hw_noop,
  1660. .enable = vlv_power_well_enable,
  1661. .disable = vlv_power_well_disable,
  1662. .is_enabled = vlv_power_well_enabled,
  1663. };
  1664. static struct i915_power_well vlv_power_wells[] = {
  1665. {
  1666. .name = "always-on",
  1667. .always_on = 1,
  1668. .domains = POWER_DOMAIN_MASK,
  1669. .ops = &i9xx_always_on_power_well_ops,
  1670. .id = PUNIT_POWER_WELL_ALWAYS_ON,
  1671. },
  1672. {
  1673. .name = "display",
  1674. .domains = VLV_DISPLAY_POWER_DOMAINS,
  1675. .id = PUNIT_POWER_WELL_DISP2D,
  1676. .ops = &vlv_display_power_well_ops,
  1677. },
  1678. {
  1679. .name = "dpio-tx-b-01",
  1680. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1681. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1682. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1683. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1684. .ops = &vlv_dpio_power_well_ops,
  1685. .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
  1686. },
  1687. {
  1688. .name = "dpio-tx-b-23",
  1689. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1690. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1691. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1692. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1693. .ops = &vlv_dpio_power_well_ops,
  1694. .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
  1695. },
  1696. {
  1697. .name = "dpio-tx-c-01",
  1698. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1699. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1700. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1701. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1702. .ops = &vlv_dpio_power_well_ops,
  1703. .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
  1704. },
  1705. {
  1706. .name = "dpio-tx-c-23",
  1707. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1708. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1709. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1710. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1711. .ops = &vlv_dpio_power_well_ops,
  1712. .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
  1713. },
  1714. {
  1715. .name = "dpio-common",
  1716. .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
  1717. .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
  1718. .ops = &vlv_dpio_cmn_power_well_ops,
  1719. },
  1720. };
  1721. static struct i915_power_well chv_power_wells[] = {
  1722. {
  1723. .name = "always-on",
  1724. .always_on = 1,
  1725. .domains = POWER_DOMAIN_MASK,
  1726. .ops = &i9xx_always_on_power_well_ops,
  1727. },
  1728. {
  1729. .name = "display",
  1730. /*
  1731. * Pipe A power well is the new disp2d well. Pipe B and C
  1732. * power wells don't actually exist. Pipe A power well is
  1733. * required for any pipe to work.
  1734. */
  1735. .domains = CHV_DISPLAY_POWER_DOMAINS,
  1736. .id = PIPE_A,
  1737. .ops = &chv_pipe_power_well_ops,
  1738. },
  1739. {
  1740. .name = "dpio-common-bc",
  1741. .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
  1742. .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
  1743. .ops = &chv_dpio_cmn_power_well_ops,
  1744. },
  1745. {
  1746. .name = "dpio-common-d",
  1747. .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
  1748. .id = PUNIT_POWER_WELL_DPIO_CMN_D,
  1749. .ops = &chv_dpio_cmn_power_well_ops,
  1750. },
  1751. };
  1752. bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  1753. int power_well_id)
  1754. {
  1755. struct i915_power_well *power_well;
  1756. bool ret;
  1757. power_well = lookup_power_well(dev_priv, power_well_id);
  1758. ret = power_well->ops->is_enabled(dev_priv, power_well);
  1759. return ret;
  1760. }
  1761. static struct i915_power_well skl_power_wells[] = {
  1762. {
  1763. .name = "always-on",
  1764. .always_on = 1,
  1765. .domains = POWER_DOMAIN_MASK,
  1766. .ops = &i9xx_always_on_power_well_ops,
  1767. .id = SKL_DISP_PW_ALWAYS_ON,
  1768. },
  1769. {
  1770. .name = "power well 1",
  1771. /* Handled by the DMC firmware */
  1772. .domains = 0,
  1773. .ops = &skl_power_well_ops,
  1774. .id = SKL_DISP_PW_1,
  1775. },
  1776. {
  1777. .name = "MISC IO power well",
  1778. /* Handled by the DMC firmware */
  1779. .domains = 0,
  1780. .ops = &skl_power_well_ops,
  1781. .id = SKL_DISP_PW_MISC_IO,
  1782. },
  1783. {
  1784. .name = "DC off",
  1785. .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
  1786. .ops = &gen9_dc_off_power_well_ops,
  1787. .id = SKL_DISP_PW_DC_OFF,
  1788. },
  1789. {
  1790. .name = "power well 2",
  1791. .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  1792. .ops = &skl_power_well_ops,
  1793. .id = SKL_DISP_PW_2,
  1794. },
  1795. {
  1796. .name = "DDI A/E IO power well",
  1797. .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
  1798. .ops = &skl_power_well_ops,
  1799. .id = SKL_DISP_PW_DDI_A_E,
  1800. },
  1801. {
  1802. .name = "DDI B IO power well",
  1803. .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
  1804. .ops = &skl_power_well_ops,
  1805. .id = SKL_DISP_PW_DDI_B,
  1806. },
  1807. {
  1808. .name = "DDI C IO power well",
  1809. .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
  1810. .ops = &skl_power_well_ops,
  1811. .id = SKL_DISP_PW_DDI_C,
  1812. },
  1813. {
  1814. .name = "DDI D IO power well",
  1815. .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
  1816. .ops = &skl_power_well_ops,
  1817. .id = SKL_DISP_PW_DDI_D,
  1818. },
  1819. };
  1820. static struct i915_power_well bxt_power_wells[] = {
  1821. {
  1822. .name = "always-on",
  1823. .always_on = 1,
  1824. .domains = POWER_DOMAIN_MASK,
  1825. .ops = &i9xx_always_on_power_well_ops,
  1826. },
  1827. {
  1828. .name = "power well 1",
  1829. .domains = 0,
  1830. .ops = &skl_power_well_ops,
  1831. .id = SKL_DISP_PW_1,
  1832. },
  1833. {
  1834. .name = "DC off",
  1835. .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
  1836. .ops = &gen9_dc_off_power_well_ops,
  1837. .id = SKL_DISP_PW_DC_OFF,
  1838. },
  1839. {
  1840. .name = "power well 2",
  1841. .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  1842. .ops = &skl_power_well_ops,
  1843. .id = SKL_DISP_PW_2,
  1844. },
  1845. {
  1846. .name = "dpio-common-a",
  1847. .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
  1848. .ops = &bxt_dpio_cmn_power_well_ops,
  1849. .id = BXT_DPIO_CMN_A,
  1850. .data = DPIO_PHY1,
  1851. },
  1852. {
  1853. .name = "dpio-common-bc",
  1854. .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
  1855. .ops = &bxt_dpio_cmn_power_well_ops,
  1856. .id = BXT_DPIO_CMN_BC,
  1857. .data = DPIO_PHY0,
  1858. },
  1859. };
  1860. static struct i915_power_well glk_power_wells[] = {
  1861. {
  1862. .name = "always-on",
  1863. .always_on = 1,
  1864. .domains = POWER_DOMAIN_MASK,
  1865. .ops = &i9xx_always_on_power_well_ops,
  1866. },
  1867. {
  1868. .name = "power well 1",
  1869. /* Handled by the DMC firmware */
  1870. .domains = 0,
  1871. .ops = &skl_power_well_ops,
  1872. .id = SKL_DISP_PW_1,
  1873. },
  1874. {
  1875. .name = "DC off",
  1876. .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
  1877. .ops = &gen9_dc_off_power_well_ops,
  1878. .id = SKL_DISP_PW_DC_OFF,
  1879. },
  1880. {
  1881. .name = "power well 2",
  1882. .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  1883. .ops = &skl_power_well_ops,
  1884. .id = SKL_DISP_PW_2,
  1885. },
  1886. {
  1887. .name = "dpio-common-a",
  1888. .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
  1889. .ops = &bxt_dpio_cmn_power_well_ops,
  1890. .id = BXT_DPIO_CMN_A,
  1891. .data = DPIO_PHY1,
  1892. },
  1893. {
  1894. .name = "dpio-common-b",
  1895. .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
  1896. .ops = &bxt_dpio_cmn_power_well_ops,
  1897. .id = BXT_DPIO_CMN_BC,
  1898. .data = DPIO_PHY0,
  1899. },
  1900. {
  1901. .name = "dpio-common-c",
  1902. .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
  1903. .ops = &bxt_dpio_cmn_power_well_ops,
  1904. .id = GLK_DPIO_CMN_C,
  1905. .data = DPIO_PHY2,
  1906. },
  1907. {
  1908. .name = "AUX A",
  1909. .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
  1910. .ops = &skl_power_well_ops,
  1911. .id = GLK_DISP_PW_AUX_A,
  1912. },
  1913. {
  1914. .name = "AUX B",
  1915. .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
  1916. .ops = &skl_power_well_ops,
  1917. .id = GLK_DISP_PW_AUX_B,
  1918. },
  1919. {
  1920. .name = "AUX C",
  1921. .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
  1922. .ops = &skl_power_well_ops,
  1923. .id = GLK_DISP_PW_AUX_C,
  1924. },
  1925. {
  1926. .name = "DDI A IO power well",
  1927. .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
  1928. .ops = &skl_power_well_ops,
  1929. .id = GLK_DISP_PW_DDI_A,
  1930. },
  1931. {
  1932. .name = "DDI B IO power well",
  1933. .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
  1934. .ops = &skl_power_well_ops,
  1935. .id = SKL_DISP_PW_DDI_B,
  1936. },
  1937. {
  1938. .name = "DDI C IO power well",
  1939. .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
  1940. .ops = &skl_power_well_ops,
  1941. .id = SKL_DISP_PW_DDI_C,
  1942. },
  1943. };
  1944. static int
  1945. sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
  1946. int disable_power_well)
  1947. {
  1948. if (disable_power_well >= 0)
  1949. return !!disable_power_well;
  1950. return 1;
  1951. }
  1952. static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
  1953. int enable_dc)
  1954. {
  1955. uint32_t mask;
  1956. int requested_dc;
  1957. int max_dc;
  1958. if (IS_GEN9_BC(dev_priv)) {
  1959. max_dc = 2;
  1960. mask = 0;
  1961. } else if (IS_GEN9_LP(dev_priv)) {
  1962. max_dc = 1;
  1963. /*
  1964. * DC9 has a separate HW flow from the rest of the DC states,
  1965. * not depending on the DMC firmware. It's needed by system
  1966. * suspend/resume, so allow it unconditionally.
  1967. */
  1968. mask = DC_STATE_EN_DC9;
  1969. } else {
  1970. max_dc = 0;
  1971. mask = 0;
  1972. }
  1973. if (!i915.disable_power_well)
  1974. max_dc = 0;
  1975. if (enable_dc >= 0 && enable_dc <= max_dc) {
  1976. requested_dc = enable_dc;
  1977. } else if (enable_dc == -1) {
  1978. requested_dc = max_dc;
  1979. } else if (enable_dc > max_dc && enable_dc <= 2) {
  1980. DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
  1981. enable_dc, max_dc);
  1982. requested_dc = max_dc;
  1983. } else {
  1984. DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
  1985. requested_dc = max_dc;
  1986. }
  1987. if (requested_dc > 1)
  1988. mask |= DC_STATE_EN_UPTO_DC6;
  1989. if (requested_dc > 0)
  1990. mask |= DC_STATE_EN_UPTO_DC5;
  1991. DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
  1992. return mask;
  1993. }
  1994. #define set_power_wells(power_domains, __power_wells) ({ \
  1995. (power_domains)->power_wells = (__power_wells); \
  1996. (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
  1997. })
  1998. /**
  1999. * intel_power_domains_init - initializes the power domain structures
  2000. * @dev_priv: i915 device instance
  2001. *
  2002. * Initializes the power domain structures for @dev_priv depending upon the
  2003. * supported platform.
  2004. */
  2005. int intel_power_domains_init(struct drm_i915_private *dev_priv)
  2006. {
  2007. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2008. i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
  2009. i915.disable_power_well);
  2010. dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
  2011. i915.enable_dc);
  2012. BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
  2013. mutex_init(&power_domains->lock);
  2014. /*
  2015. * The enabling order will be from lower to higher indexed wells,
  2016. * the disabling order is reversed.
  2017. */
  2018. if (IS_HASWELL(dev_priv)) {
  2019. set_power_wells(power_domains, hsw_power_wells);
  2020. } else if (IS_BROADWELL(dev_priv)) {
  2021. set_power_wells(power_domains, bdw_power_wells);
  2022. } else if (IS_GEN9_BC(dev_priv)) {
  2023. set_power_wells(power_domains, skl_power_wells);
  2024. } else if (IS_BROXTON(dev_priv)) {
  2025. set_power_wells(power_domains, bxt_power_wells);
  2026. } else if (IS_GEMINILAKE(dev_priv)) {
  2027. set_power_wells(power_domains, glk_power_wells);
  2028. } else if (IS_CHERRYVIEW(dev_priv)) {
  2029. set_power_wells(power_domains, chv_power_wells);
  2030. } else if (IS_VALLEYVIEW(dev_priv)) {
  2031. set_power_wells(power_domains, vlv_power_wells);
  2032. } else {
  2033. set_power_wells(power_domains, i9xx_always_on_power_well);
  2034. }
  2035. return 0;
  2036. }
  2037. /**
  2038. * intel_power_domains_fini - finalizes the power domain structures
  2039. * @dev_priv: i915 device instance
  2040. *
  2041. * Finalizes the power domain structures for @dev_priv depending upon the
  2042. * supported platform. This function also disables runtime pm and ensures that
  2043. * the device stays powered up so that the driver can be reloaded.
  2044. */
  2045. void intel_power_domains_fini(struct drm_i915_private *dev_priv)
  2046. {
  2047. struct device *kdev = &dev_priv->drm.pdev->dev;
  2048. /*
  2049. * The i915.ko module is still not prepared to be loaded when
  2050. * the power well is not enabled, so just enable it in case
  2051. * we're going to unload/reload.
  2052. * The following also reacquires the RPM reference the core passed
  2053. * to the driver during loading, which is dropped in
  2054. * intel_runtime_pm_enable(). We have to hand back the control of the
  2055. * device to the core with this reference held.
  2056. */
  2057. intel_display_set_init_power(dev_priv, true);
  2058. /* Remove the refcount we took to keep power well support disabled. */
  2059. if (!i915.disable_power_well)
  2060. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  2061. /*
  2062. * Remove the refcount we took in intel_runtime_pm_enable() in case
  2063. * the platform doesn't support runtime PM.
  2064. */
  2065. if (!HAS_RUNTIME_PM(dev_priv))
  2066. pm_runtime_put(kdev);
  2067. }
  2068. static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
  2069. {
  2070. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2071. struct i915_power_well *power_well;
  2072. mutex_lock(&power_domains->lock);
  2073. for_each_power_well(dev_priv, power_well) {
  2074. power_well->ops->sync_hw(dev_priv, power_well);
  2075. power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
  2076. power_well);
  2077. }
  2078. mutex_unlock(&power_domains->lock);
  2079. }
  2080. static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
  2081. {
  2082. I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
  2083. POSTING_READ(DBUF_CTL);
  2084. udelay(10);
  2085. if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
  2086. DRM_ERROR("DBuf power enable timeout\n");
  2087. }
  2088. static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
  2089. {
  2090. I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
  2091. POSTING_READ(DBUF_CTL);
  2092. udelay(10);
  2093. if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
  2094. DRM_ERROR("DBuf power disable timeout!\n");
  2095. }
  2096. static void skl_display_core_init(struct drm_i915_private *dev_priv,
  2097. bool resume)
  2098. {
  2099. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2100. struct i915_power_well *well;
  2101. uint32_t val;
  2102. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2103. /* enable PCH reset handshake */
  2104. val = I915_READ(HSW_NDE_RSTWRN_OPT);
  2105. I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
  2106. /* enable PG1 and Misc I/O */
  2107. mutex_lock(&power_domains->lock);
  2108. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2109. intel_power_well_enable(dev_priv, well);
  2110. well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
  2111. intel_power_well_enable(dev_priv, well);
  2112. mutex_unlock(&power_domains->lock);
  2113. skl_init_cdclk(dev_priv);
  2114. gen9_dbuf_enable(dev_priv);
  2115. if (resume && dev_priv->csr.dmc_payload)
  2116. intel_csr_load_program(dev_priv);
  2117. }
  2118. static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
  2119. {
  2120. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2121. struct i915_power_well *well;
  2122. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2123. gen9_dbuf_disable(dev_priv);
  2124. skl_uninit_cdclk(dev_priv);
  2125. /* The spec doesn't call for removing the reset handshake flag */
  2126. /* disable PG1 and Misc I/O */
  2127. mutex_lock(&power_domains->lock);
  2128. well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
  2129. intel_power_well_disable(dev_priv, well);
  2130. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2131. intel_power_well_disable(dev_priv, well);
  2132. mutex_unlock(&power_domains->lock);
  2133. }
  2134. void bxt_display_core_init(struct drm_i915_private *dev_priv,
  2135. bool resume)
  2136. {
  2137. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2138. struct i915_power_well *well;
  2139. uint32_t val;
  2140. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2141. /*
  2142. * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
  2143. * or else the reset will hang because there is no PCH to respond.
  2144. * Move the handshake programming to initialization sequence.
  2145. * Previously was left up to BIOS.
  2146. */
  2147. val = I915_READ(HSW_NDE_RSTWRN_OPT);
  2148. val &= ~RESET_PCH_HANDSHAKE_ENABLE;
  2149. I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
  2150. /* Enable PG1 */
  2151. mutex_lock(&power_domains->lock);
  2152. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2153. intel_power_well_enable(dev_priv, well);
  2154. mutex_unlock(&power_domains->lock);
  2155. bxt_init_cdclk(dev_priv);
  2156. gen9_dbuf_enable(dev_priv);
  2157. if (resume && dev_priv->csr.dmc_payload)
  2158. intel_csr_load_program(dev_priv);
  2159. }
  2160. void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
  2161. {
  2162. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2163. struct i915_power_well *well;
  2164. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2165. gen9_dbuf_disable(dev_priv);
  2166. bxt_uninit_cdclk(dev_priv);
  2167. /* The spec doesn't call for removing the reset handshake flag */
  2168. /* Disable PG1 */
  2169. mutex_lock(&power_domains->lock);
  2170. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2171. intel_power_well_disable(dev_priv, well);
  2172. mutex_unlock(&power_domains->lock);
  2173. }
  2174. static void chv_phy_control_init(struct drm_i915_private *dev_priv)
  2175. {
  2176. struct i915_power_well *cmn_bc =
  2177. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  2178. struct i915_power_well *cmn_d =
  2179. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
  2180. /*
  2181. * DISPLAY_PHY_CONTROL can get corrupted if read. As a
  2182. * workaround never ever read DISPLAY_PHY_CONTROL, and
  2183. * instead maintain a shadow copy ourselves. Use the actual
  2184. * power well state and lane status to reconstruct the
  2185. * expected initial value.
  2186. */
  2187. dev_priv->chv_phy_control =
  2188. PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
  2189. PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
  2190. PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
  2191. PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
  2192. PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
  2193. /*
  2194. * If all lanes are disabled we leave the override disabled
  2195. * with all power down bits cleared to match the state we
  2196. * would use after disabling the port. Otherwise enable the
  2197. * override and set the lane powerdown bits accding to the
  2198. * current lane status.
  2199. */
  2200. if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
  2201. uint32_t status = I915_READ(DPLL(PIPE_A));
  2202. unsigned int mask;
  2203. mask = status & DPLL_PORTB_READY_MASK;
  2204. if (mask == 0xf)
  2205. mask = 0x0;
  2206. else
  2207. dev_priv->chv_phy_control |=
  2208. PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
  2209. dev_priv->chv_phy_control |=
  2210. PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
  2211. mask = (status & DPLL_PORTC_READY_MASK) >> 4;
  2212. if (mask == 0xf)
  2213. mask = 0x0;
  2214. else
  2215. dev_priv->chv_phy_control |=
  2216. PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
  2217. dev_priv->chv_phy_control |=
  2218. PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
  2219. dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
  2220. dev_priv->chv_phy_assert[DPIO_PHY0] = false;
  2221. } else {
  2222. dev_priv->chv_phy_assert[DPIO_PHY0] = true;
  2223. }
  2224. if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
  2225. uint32_t status = I915_READ(DPIO_PHY_STATUS);
  2226. unsigned int mask;
  2227. mask = status & DPLL_PORTD_READY_MASK;
  2228. if (mask == 0xf)
  2229. mask = 0x0;
  2230. else
  2231. dev_priv->chv_phy_control |=
  2232. PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
  2233. dev_priv->chv_phy_control |=
  2234. PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
  2235. dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
  2236. dev_priv->chv_phy_assert[DPIO_PHY1] = false;
  2237. } else {
  2238. dev_priv->chv_phy_assert[DPIO_PHY1] = true;
  2239. }
  2240. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  2241. DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
  2242. dev_priv->chv_phy_control);
  2243. }
  2244. static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
  2245. {
  2246. struct i915_power_well *cmn =
  2247. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  2248. struct i915_power_well *disp2d =
  2249. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
  2250. /* If the display might be already active skip this */
  2251. if (cmn->ops->is_enabled(dev_priv, cmn) &&
  2252. disp2d->ops->is_enabled(dev_priv, disp2d) &&
  2253. I915_READ(DPIO_CTL) & DPIO_CMNRST)
  2254. return;
  2255. DRM_DEBUG_KMS("toggling display PHY side reset\n");
  2256. /* cmnlane needs DPLL registers */
  2257. disp2d->ops->enable(dev_priv, disp2d);
  2258. /*
  2259. * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
  2260. * Need to assert and de-assert PHY SB reset by gating the
  2261. * common lane power, then un-gating it.
  2262. * Simply ungating isn't enough to reset the PHY enough to get
  2263. * ports and lanes running.
  2264. */
  2265. cmn->ops->disable(dev_priv, cmn);
  2266. }
  2267. /**
  2268. * intel_power_domains_init_hw - initialize hardware power domain state
  2269. * @dev_priv: i915 device instance
  2270. * @resume: Called from resume code paths or not
  2271. *
  2272. * This function initializes the hardware power domain state and enables all
  2273. * power wells belonging to the INIT power domain. Power wells in other
  2274. * domains (and not in the INIT domain) are referenced or disabled during the
  2275. * modeset state HW readout. After that the reference count of each power well
  2276. * must match its HW enabled state, see intel_power_domains_verify_state().
  2277. */
  2278. void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
  2279. {
  2280. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2281. power_domains->initializing = true;
  2282. if (IS_GEN9_BC(dev_priv)) {
  2283. skl_display_core_init(dev_priv, resume);
  2284. } else if (IS_GEN9_LP(dev_priv)) {
  2285. bxt_display_core_init(dev_priv, resume);
  2286. } else if (IS_CHERRYVIEW(dev_priv)) {
  2287. mutex_lock(&power_domains->lock);
  2288. chv_phy_control_init(dev_priv);
  2289. mutex_unlock(&power_domains->lock);
  2290. } else if (IS_VALLEYVIEW(dev_priv)) {
  2291. mutex_lock(&power_domains->lock);
  2292. vlv_cmnlane_wa(dev_priv);
  2293. mutex_unlock(&power_domains->lock);
  2294. }
  2295. /* For now, we need the power well to be always enabled. */
  2296. intel_display_set_init_power(dev_priv, true);
  2297. /* Disable power support if the user asked so. */
  2298. if (!i915.disable_power_well)
  2299. intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  2300. intel_power_domains_sync_hw(dev_priv);
  2301. power_domains->initializing = false;
  2302. }
  2303. /**
  2304. * intel_power_domains_suspend - suspend power domain state
  2305. * @dev_priv: i915 device instance
  2306. *
  2307. * This function prepares the hardware power domain state before entering
  2308. * system suspend. It must be paired with intel_power_domains_init_hw().
  2309. */
  2310. void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
  2311. {
  2312. /*
  2313. * Even if power well support was disabled we still want to disable
  2314. * power wells while we are system suspended.
  2315. */
  2316. if (!i915.disable_power_well)
  2317. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  2318. if (IS_GEN9_BC(dev_priv))
  2319. skl_display_core_uninit(dev_priv);
  2320. else if (IS_GEN9_LP(dev_priv))
  2321. bxt_display_core_uninit(dev_priv);
  2322. }
  2323. static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
  2324. {
  2325. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2326. struct i915_power_well *power_well;
  2327. for_each_power_well(dev_priv, power_well) {
  2328. enum intel_display_power_domain domain;
  2329. DRM_DEBUG_DRIVER("%-25s %d\n",
  2330. power_well->name, power_well->count);
  2331. for_each_power_domain(domain, power_well->domains)
  2332. DRM_DEBUG_DRIVER(" %-23s %d\n",
  2333. intel_display_power_domain_str(domain),
  2334. power_domains->domain_use_count[domain]);
  2335. }
  2336. }
  2337. /**
  2338. * intel_power_domains_verify_state - verify the HW/SW state for all power wells
  2339. * @dev_priv: i915 device instance
  2340. *
  2341. * Verify if the reference count of each power well matches its HW enabled
  2342. * state and the total refcount of the domains it belongs to. This must be
  2343. * called after modeset HW state sanitization, which is responsible for
  2344. * acquiring reference counts for any power wells in use and disabling the
  2345. * ones left on by BIOS but not required by any active output.
  2346. */
  2347. void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
  2348. {
  2349. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2350. struct i915_power_well *power_well;
  2351. bool dump_domain_info;
  2352. mutex_lock(&power_domains->lock);
  2353. dump_domain_info = false;
  2354. for_each_power_well(dev_priv, power_well) {
  2355. enum intel_display_power_domain domain;
  2356. int domains_count;
  2357. bool enabled;
  2358. /*
  2359. * Power wells not belonging to any domain (like the MISC_IO
  2360. * and PW1 power wells) are under FW control, so ignore them,
  2361. * since their state can change asynchronously.
  2362. */
  2363. if (!power_well->domains)
  2364. continue;
  2365. enabled = power_well->ops->is_enabled(dev_priv, power_well);
  2366. if ((power_well->count || power_well->always_on) != enabled)
  2367. DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
  2368. power_well->name, power_well->count, enabled);
  2369. domains_count = 0;
  2370. for_each_power_domain(domain, power_well->domains)
  2371. domains_count += power_domains->domain_use_count[domain];
  2372. if (power_well->count != domains_count) {
  2373. DRM_ERROR("power well %s refcount/domain refcount mismatch "
  2374. "(refcount %d/domains refcount %d)\n",
  2375. power_well->name, power_well->count,
  2376. domains_count);
  2377. dump_domain_info = true;
  2378. }
  2379. }
  2380. if (dump_domain_info) {
  2381. static bool dumped;
  2382. if (!dumped) {
  2383. intel_power_domains_dump_info(dev_priv);
  2384. dumped = true;
  2385. }
  2386. }
  2387. mutex_unlock(&power_domains->lock);
  2388. }
  2389. /**
  2390. * intel_runtime_pm_get - grab a runtime pm reference
  2391. * @dev_priv: i915 device instance
  2392. *
  2393. * This function grabs a device-level runtime pm reference (mostly used for GEM
  2394. * code to ensure the GTT or GT is on) and ensures that it is powered up.
  2395. *
  2396. * Any runtime pm reference obtained by this function must have a symmetric
  2397. * call to intel_runtime_pm_put() to release the reference again.
  2398. */
  2399. void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  2400. {
  2401. struct pci_dev *pdev = dev_priv->drm.pdev;
  2402. struct device *kdev = &pdev->dev;
  2403. int ret;
  2404. ret = pm_runtime_get_sync(kdev);
  2405. WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
  2406. atomic_inc(&dev_priv->pm.wakeref_count);
  2407. assert_rpm_wakelock_held(dev_priv);
  2408. }
  2409. /**
  2410. * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
  2411. * @dev_priv: i915 device instance
  2412. *
  2413. * This function grabs a device-level runtime pm reference if the device is
  2414. * already in use and ensures that it is powered up.
  2415. *
  2416. * Any runtime pm reference obtained by this function must have a symmetric
  2417. * call to intel_runtime_pm_put() to release the reference again.
  2418. */
  2419. bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
  2420. {
  2421. struct pci_dev *pdev = dev_priv->drm.pdev;
  2422. struct device *kdev = &pdev->dev;
  2423. if (IS_ENABLED(CONFIG_PM)) {
  2424. int ret = pm_runtime_get_if_in_use(kdev);
  2425. /*
  2426. * In cases runtime PM is disabled by the RPM core and we get
  2427. * an -EINVAL return value we are not supposed to call this
  2428. * function, since the power state is undefined. This applies
  2429. * atm to the late/early system suspend/resume handlers.
  2430. */
  2431. WARN_ONCE(ret < 0,
  2432. "pm_runtime_get_if_in_use() failed: %d\n", ret);
  2433. if (ret <= 0)
  2434. return false;
  2435. }
  2436. atomic_inc(&dev_priv->pm.wakeref_count);
  2437. assert_rpm_wakelock_held(dev_priv);
  2438. return true;
  2439. }
  2440. /**
  2441. * intel_runtime_pm_get_noresume - grab a runtime pm reference
  2442. * @dev_priv: i915 device instance
  2443. *
  2444. * This function grabs a device-level runtime pm reference (mostly used for GEM
  2445. * code to ensure the GTT or GT is on).
  2446. *
  2447. * It will _not_ power up the device but instead only check that it's powered
  2448. * on. Therefore it is only valid to call this functions from contexts where
  2449. * the device is known to be powered up and where trying to power it up would
  2450. * result in hilarity and deadlocks. That pretty much means only the system
  2451. * suspend/resume code where this is used to grab runtime pm references for
  2452. * delayed setup down in work items.
  2453. *
  2454. * Any runtime pm reference obtained by this function must have a symmetric
  2455. * call to intel_runtime_pm_put() to release the reference again.
  2456. */
  2457. void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  2458. {
  2459. struct pci_dev *pdev = dev_priv->drm.pdev;
  2460. struct device *kdev = &pdev->dev;
  2461. assert_rpm_wakelock_held(dev_priv);
  2462. pm_runtime_get_noresume(kdev);
  2463. atomic_inc(&dev_priv->pm.wakeref_count);
  2464. }
  2465. /**
  2466. * intel_runtime_pm_put - release a runtime pm reference
  2467. * @dev_priv: i915 device instance
  2468. *
  2469. * This function drops the device-level runtime pm reference obtained by
  2470. * intel_runtime_pm_get() and might power down the corresponding
  2471. * hardware block right away if this is the last reference.
  2472. */
  2473. void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  2474. {
  2475. struct pci_dev *pdev = dev_priv->drm.pdev;
  2476. struct device *kdev = &pdev->dev;
  2477. assert_rpm_wakelock_held(dev_priv);
  2478. atomic_dec(&dev_priv->pm.wakeref_count);
  2479. pm_runtime_mark_last_busy(kdev);
  2480. pm_runtime_put_autosuspend(kdev);
  2481. }
  2482. /**
  2483. * intel_runtime_pm_enable - enable runtime pm
  2484. * @dev_priv: i915 device instance
  2485. *
  2486. * This function enables runtime pm at the end of the driver load sequence.
  2487. *
  2488. * Note that this function does currently not enable runtime pm for the
  2489. * subordinate display power domains. That is only done on the first modeset
  2490. * using intel_display_set_init_power().
  2491. */
  2492. void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
  2493. {
  2494. struct pci_dev *pdev = dev_priv->drm.pdev;
  2495. struct device *kdev = &pdev->dev;
  2496. pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
  2497. pm_runtime_mark_last_busy(kdev);
  2498. /*
  2499. * Take a permanent reference to disable the RPM functionality and drop
  2500. * it only when unloading the driver. Use the low level get/put helpers,
  2501. * so the driver's own RPM reference tracking asserts also work on
  2502. * platforms without RPM support.
  2503. */
  2504. if (!HAS_RUNTIME_PM(dev_priv)) {
  2505. int ret;
  2506. pm_runtime_dont_use_autosuspend(kdev);
  2507. ret = pm_runtime_get_sync(kdev);
  2508. WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
  2509. } else {
  2510. pm_runtime_use_autosuspend(kdev);
  2511. }
  2512. /*
  2513. * The core calls the driver load handler with an RPM reference held.
  2514. * We drop that here and will reacquire it during unloading in
  2515. * intel_power_domains_fini().
  2516. */
  2517. pm_runtime_put_autosuspend(kdev);
  2518. }