intel_runtime_pm.c 102 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510
  1. /*
  2. * Copyright © 2012-2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eugeni Dodonov <eugeni.dodonov@intel.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. *
  27. */
  28. #include <linux/pm_runtime.h>
  29. #include <linux/vgaarb.h>
  30. #include "i915_drv.h"
  31. #include "intel_drv.h"
  32. /**
  33. * DOC: runtime pm
  34. *
  35. * The i915 driver supports dynamic enabling and disabling of entire hardware
  36. * blocks at runtime. This is especially important on the display side where
  37. * software is supposed to control many power gates manually on recent hardware,
  38. * since on the GT side a lot of the power management is done by the hardware.
  39. * But even there some manual control at the device level is required.
  40. *
  41. * Since i915 supports a diverse set of platforms with a unified codebase and
  42. * hardware engineers just love to shuffle functionality around between power
  43. * domains there's a sizeable amount of indirection required. This file provides
  44. * generic functions to the driver for grabbing and releasing references for
  45. * abstract power domains. It then maps those to the actual power wells
  46. * present for a given platform.
  47. */
  48. bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  49. enum i915_power_well_id power_well_id);
  50. static struct i915_power_well *
  51. lookup_power_well(struct drm_i915_private *dev_priv,
  52. enum i915_power_well_id power_well_id);
  53. const char *
  54. intel_display_power_domain_str(enum intel_display_power_domain domain)
  55. {
  56. switch (domain) {
  57. case POWER_DOMAIN_PIPE_A:
  58. return "PIPE_A";
  59. case POWER_DOMAIN_PIPE_B:
  60. return "PIPE_B";
  61. case POWER_DOMAIN_PIPE_C:
  62. return "PIPE_C";
  63. case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
  64. return "PIPE_A_PANEL_FITTER";
  65. case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
  66. return "PIPE_B_PANEL_FITTER";
  67. case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
  68. return "PIPE_C_PANEL_FITTER";
  69. case POWER_DOMAIN_TRANSCODER_A:
  70. return "TRANSCODER_A";
  71. case POWER_DOMAIN_TRANSCODER_B:
  72. return "TRANSCODER_B";
  73. case POWER_DOMAIN_TRANSCODER_C:
  74. return "TRANSCODER_C";
  75. case POWER_DOMAIN_TRANSCODER_EDP:
  76. return "TRANSCODER_EDP";
  77. case POWER_DOMAIN_TRANSCODER_DSI_A:
  78. return "TRANSCODER_DSI_A";
  79. case POWER_DOMAIN_TRANSCODER_DSI_C:
  80. return "TRANSCODER_DSI_C";
  81. case POWER_DOMAIN_PORT_DDI_A_LANES:
  82. return "PORT_DDI_A_LANES";
  83. case POWER_DOMAIN_PORT_DDI_B_LANES:
  84. return "PORT_DDI_B_LANES";
  85. case POWER_DOMAIN_PORT_DDI_C_LANES:
  86. return "PORT_DDI_C_LANES";
  87. case POWER_DOMAIN_PORT_DDI_D_LANES:
  88. return "PORT_DDI_D_LANES";
  89. case POWER_DOMAIN_PORT_DDI_E_LANES:
  90. return "PORT_DDI_E_LANES";
  91. case POWER_DOMAIN_PORT_DDI_F_LANES:
  92. return "PORT_DDI_F_LANES";
  93. case POWER_DOMAIN_PORT_DDI_A_IO:
  94. return "PORT_DDI_A_IO";
  95. case POWER_DOMAIN_PORT_DDI_B_IO:
  96. return "PORT_DDI_B_IO";
  97. case POWER_DOMAIN_PORT_DDI_C_IO:
  98. return "PORT_DDI_C_IO";
  99. case POWER_DOMAIN_PORT_DDI_D_IO:
  100. return "PORT_DDI_D_IO";
  101. case POWER_DOMAIN_PORT_DDI_E_IO:
  102. return "PORT_DDI_E_IO";
  103. case POWER_DOMAIN_PORT_DDI_F_IO:
  104. return "PORT_DDI_F_IO";
  105. case POWER_DOMAIN_PORT_DSI:
  106. return "PORT_DSI";
  107. case POWER_DOMAIN_PORT_CRT:
  108. return "PORT_CRT";
  109. case POWER_DOMAIN_PORT_OTHER:
  110. return "PORT_OTHER";
  111. case POWER_DOMAIN_VGA:
  112. return "VGA";
  113. case POWER_DOMAIN_AUDIO:
  114. return "AUDIO";
  115. case POWER_DOMAIN_PLLS:
  116. return "PLLS";
  117. case POWER_DOMAIN_AUX_A:
  118. return "AUX_A";
  119. case POWER_DOMAIN_AUX_B:
  120. return "AUX_B";
  121. case POWER_DOMAIN_AUX_C:
  122. return "AUX_C";
  123. case POWER_DOMAIN_AUX_D:
  124. return "AUX_D";
  125. case POWER_DOMAIN_AUX_E:
  126. return "AUX_E";
  127. case POWER_DOMAIN_AUX_F:
  128. return "AUX_F";
  129. case POWER_DOMAIN_AUX_IO_A:
  130. return "AUX_IO_A";
  131. case POWER_DOMAIN_GMBUS:
  132. return "GMBUS";
  133. case POWER_DOMAIN_INIT:
  134. return "INIT";
  135. case POWER_DOMAIN_MODESET:
  136. return "MODESET";
  137. case POWER_DOMAIN_GT_IRQ:
  138. return "GT_IRQ";
  139. default:
  140. MISSING_CASE(domain);
  141. return "?";
  142. }
  143. }
  144. static void intel_power_well_enable(struct drm_i915_private *dev_priv,
  145. struct i915_power_well *power_well)
  146. {
  147. DRM_DEBUG_KMS("enabling %s\n", power_well->name);
  148. power_well->ops->enable(dev_priv, power_well);
  149. power_well->hw_enabled = true;
  150. }
  151. static void intel_power_well_disable(struct drm_i915_private *dev_priv,
  152. struct i915_power_well *power_well)
  153. {
  154. DRM_DEBUG_KMS("disabling %s\n", power_well->name);
  155. power_well->hw_enabled = false;
  156. power_well->ops->disable(dev_priv, power_well);
  157. }
  158. static void intel_power_well_get(struct drm_i915_private *dev_priv,
  159. struct i915_power_well *power_well)
  160. {
  161. if (!power_well->count++)
  162. intel_power_well_enable(dev_priv, power_well);
  163. }
  164. static void intel_power_well_put(struct drm_i915_private *dev_priv,
  165. struct i915_power_well *power_well)
  166. {
  167. WARN(!power_well->count, "Use count on power well %s is already zero",
  168. power_well->name);
  169. if (!--power_well->count)
  170. intel_power_well_disable(dev_priv, power_well);
  171. }
  172. /**
  173. * __intel_display_power_is_enabled - unlocked check for a power domain
  174. * @dev_priv: i915 device instance
  175. * @domain: power domain to check
  176. *
  177. * This is the unlocked version of intel_display_power_is_enabled() and should
  178. * only be used from error capture and recovery code where deadlocks are
  179. * possible.
  180. *
  181. * Returns:
  182. * True when the power domain is enabled, false otherwise.
  183. */
  184. bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  185. enum intel_display_power_domain domain)
  186. {
  187. struct i915_power_well *power_well;
  188. bool is_enabled;
  189. if (dev_priv->runtime_pm.suspended)
  190. return false;
  191. is_enabled = true;
  192. for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
  193. if (power_well->always_on)
  194. continue;
  195. if (!power_well->hw_enabled) {
  196. is_enabled = false;
  197. break;
  198. }
  199. }
  200. return is_enabled;
  201. }
  202. /**
  203. * intel_display_power_is_enabled - check for a power domain
  204. * @dev_priv: i915 device instance
  205. * @domain: power domain to check
  206. *
  207. * This function can be used to check the hw power domain state. It is mostly
  208. * used in hardware state readout functions. Everywhere else code should rely
  209. * upon explicit power domain reference counting to ensure that the hardware
  210. * block is powered up before accessing it.
  211. *
  212. * Callers must hold the relevant modesetting locks to ensure that concurrent
  213. * threads can't disable the power well while the caller tries to read a few
  214. * registers.
  215. *
  216. * Returns:
  217. * True when the power domain is enabled, false otherwise.
  218. */
  219. bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  220. enum intel_display_power_domain domain)
  221. {
  222. struct i915_power_domains *power_domains;
  223. bool ret;
  224. power_domains = &dev_priv->power_domains;
  225. mutex_lock(&power_domains->lock);
  226. ret = __intel_display_power_is_enabled(dev_priv, domain);
  227. mutex_unlock(&power_domains->lock);
  228. return ret;
  229. }
  230. /**
  231. * intel_display_set_init_power - set the initial power domain state
  232. * @dev_priv: i915 device instance
  233. * @enable: whether to enable or disable the initial power domain state
  234. *
  235. * For simplicity our driver load/unload and system suspend/resume code assumes
  236. * that all power domains are always enabled. This functions controls the state
  237. * of this little hack. While the initial power domain state is enabled runtime
  238. * pm is effectively disabled.
  239. */
  240. void intel_display_set_init_power(struct drm_i915_private *dev_priv,
  241. bool enable)
  242. {
  243. if (dev_priv->power_domains.init_power_on == enable)
  244. return;
  245. if (enable)
  246. intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  247. else
  248. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  249. dev_priv->power_domains.init_power_on = enable;
  250. }
  251. /*
  252. * Starting with Haswell, we have a "Power Down Well" that can be turned off
  253. * when not needed anymore. We have 4 registers that can request the power well
  254. * to be enabled, and it will only be disabled if none of the registers is
  255. * requesting it to be enabled.
  256. */
  257. static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
  258. u8 irq_pipe_mask, bool has_vga)
  259. {
  260. struct pci_dev *pdev = dev_priv->drm.pdev;
  261. /*
  262. * After we re-enable the power well, if we touch VGA register 0x3d5
  263. * we'll get unclaimed register interrupts. This stops after we write
  264. * anything to the VGA MSR register. The vgacon module uses this
  265. * register all the time, so if we unbind our driver and, as a
  266. * consequence, bind vgacon, we'll get stuck in an infinite loop at
  267. * console_unlock(). So make here we touch the VGA MSR register, making
  268. * sure vgacon can keep working normally without triggering interrupts
  269. * and error messages.
  270. */
  271. if (has_vga) {
  272. vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
  273. outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  274. vga_put(pdev, VGA_RSRC_LEGACY_IO);
  275. }
  276. if (irq_pipe_mask)
  277. gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
  278. }
  279. static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
  280. u8 irq_pipe_mask)
  281. {
  282. if (irq_pipe_mask)
  283. gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
  284. }
  285. static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
  286. struct i915_power_well *power_well)
  287. {
  288. enum i915_power_well_id id = power_well->id;
  289. /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
  290. WARN_ON(intel_wait_for_register(dev_priv,
  291. HSW_PWR_WELL_CTL_DRIVER(id),
  292. HSW_PWR_WELL_CTL_STATE(id),
  293. HSW_PWR_WELL_CTL_STATE(id),
  294. 1));
  295. }
  296. static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
  297. enum i915_power_well_id id)
  298. {
  299. u32 req_mask = HSW_PWR_WELL_CTL_REQ(id);
  300. u32 ret;
  301. ret = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)) & req_mask ? 1 : 0;
  302. ret |= I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & req_mask ? 2 : 0;
  303. ret |= I915_READ(HSW_PWR_WELL_CTL_KVMR) & req_mask ? 4 : 0;
  304. ret |= I915_READ(HSW_PWR_WELL_CTL_DEBUG(id)) & req_mask ? 8 : 0;
  305. return ret;
  306. }
  307. static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
  308. struct i915_power_well *power_well)
  309. {
  310. enum i915_power_well_id id = power_well->id;
  311. bool disabled;
  312. u32 reqs;
  313. /*
  314. * Bspec doesn't require waiting for PWs to get disabled, but still do
  315. * this for paranoia. The known cases where a PW will be forced on:
  316. * - a KVMR request on any power well via the KVMR request register
  317. * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
  318. * DEBUG request registers
  319. * Skip the wait in case any of the request bits are set and print a
  320. * diagnostic message.
  321. */
  322. wait_for((disabled = !(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
  323. HSW_PWR_WELL_CTL_STATE(id))) ||
  324. (reqs = hsw_power_well_requesters(dev_priv, id)), 1);
  325. if (disabled)
  326. return;
  327. DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
  328. power_well->name,
  329. !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
  330. }
  331. static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
  332. enum skl_power_gate pg)
  333. {
  334. /* Timeout 5us for PG#0, for other PGs 1us */
  335. WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
  336. SKL_FUSE_PG_DIST_STATUS(pg),
  337. SKL_FUSE_PG_DIST_STATUS(pg), 1));
  338. }
  339. static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
  340. struct i915_power_well *power_well)
  341. {
  342. enum i915_power_well_id id = power_well->id;
  343. bool wait_fuses = power_well->hsw.has_fuses;
  344. enum skl_power_gate uninitialized_var(pg);
  345. u32 val;
  346. if (wait_fuses) {
  347. pg = SKL_PW_TO_PG(id);
  348. /*
  349. * For PW1 we have to wait both for the PW0/PG0 fuse state
  350. * before enabling the power well and PW1/PG1's own fuse
  351. * state after the enabling. For all other power wells with
  352. * fuses we only have to wait for that PW/PG's fuse state
  353. * after the enabling.
  354. */
  355. if (pg == SKL_PG1)
  356. gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
  357. }
  358. val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
  359. I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
  360. hsw_wait_for_power_well_enable(dev_priv, power_well);
  361. /* Display WA #1178: cnl */
  362. if (IS_CANNONLAKE(dev_priv) &&
  363. (id == CNL_DISP_PW_AUX_B || id == CNL_DISP_PW_AUX_C ||
  364. id == CNL_DISP_PW_AUX_D || id == CNL_DISP_PW_AUX_F)) {
  365. val = I915_READ(CNL_AUX_ANAOVRD1(id));
  366. val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
  367. I915_WRITE(CNL_AUX_ANAOVRD1(id), val);
  368. }
  369. if (wait_fuses)
  370. gen9_wait_for_power_well_fuses(dev_priv, pg);
  371. hsw_power_well_post_enable(dev_priv, power_well->hsw.irq_pipe_mask,
  372. power_well->hsw.has_vga);
  373. }
  374. static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
  375. struct i915_power_well *power_well)
  376. {
  377. enum i915_power_well_id id = power_well->id;
  378. u32 val;
  379. hsw_power_well_pre_disable(dev_priv, power_well->hsw.irq_pipe_mask);
  380. val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
  381. I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
  382. val & ~HSW_PWR_WELL_CTL_REQ(id));
  383. hsw_wait_for_power_well_disable(dev_priv, power_well);
  384. }
  385. /*
  386. * We should only use the power well if we explicitly asked the hardware to
  387. * enable it, so check if it's enabled and also check if we've requested it to
  388. * be enabled.
  389. */
  390. static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
  391. struct i915_power_well *power_well)
  392. {
  393. enum i915_power_well_id id = power_well->id;
  394. u32 mask = HSW_PWR_WELL_CTL_REQ(id) | HSW_PWR_WELL_CTL_STATE(id);
  395. return (I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & mask) == mask;
  396. }
  397. static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
  398. {
  399. enum i915_power_well_id id = SKL_DISP_PW_2;
  400. WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
  401. "DC9 already programmed to be enabled.\n");
  402. WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
  403. "DC5 still not disabled to enable DC9.\n");
  404. WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
  405. HSW_PWR_WELL_CTL_REQ(id),
  406. "Power well 2 on.\n");
  407. WARN_ONCE(intel_irqs_enabled(dev_priv),
  408. "Interrupts not disabled yet.\n");
  409. /*
  410. * TODO: check for the following to verify the conditions to enter DC9
  411. * state are satisfied:
  412. * 1] Check relevant display engine registers to verify if mode set
  413. * disable sequence was followed.
  414. * 2] Check if display uninitialize sequence is initialized.
  415. */
  416. }
  417. static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
  418. {
  419. WARN_ONCE(intel_irqs_enabled(dev_priv),
  420. "Interrupts not disabled yet.\n");
  421. WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
  422. "DC5 still not disabled.\n");
  423. /*
  424. * TODO: check for the following to verify DC9 state was indeed
  425. * entered before programming to disable it:
  426. * 1] Check relevant display engine registers to verify if mode
  427. * set disable sequence was followed.
  428. * 2] Check if display uninitialize sequence is initialized.
  429. */
  430. }
  431. static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
  432. u32 state)
  433. {
  434. int rewrites = 0;
  435. int rereads = 0;
  436. u32 v;
  437. I915_WRITE(DC_STATE_EN, state);
  438. /* It has been observed that disabling the dc6 state sometimes
  439. * doesn't stick and dmc keeps returning old value. Make sure
  440. * the write really sticks enough times and also force rewrite until
  441. * we are confident that state is exactly what we want.
  442. */
  443. do {
  444. v = I915_READ(DC_STATE_EN);
  445. if (v != state) {
  446. I915_WRITE(DC_STATE_EN, state);
  447. rewrites++;
  448. rereads = 0;
  449. } else if (rereads++ > 5) {
  450. break;
  451. }
  452. } while (rewrites < 100);
  453. if (v != state)
  454. DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
  455. state, v);
  456. /* Most of the times we need one retry, avoid spam */
  457. if (rewrites > 1)
  458. DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
  459. state, rewrites);
  460. }
  461. static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
  462. {
  463. u32 mask;
  464. mask = DC_STATE_EN_UPTO_DC5;
  465. if (IS_GEN9_LP(dev_priv))
  466. mask |= DC_STATE_EN_DC9;
  467. else
  468. mask |= DC_STATE_EN_UPTO_DC6;
  469. return mask;
  470. }
  471. void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
  472. {
  473. u32 val;
  474. val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
  475. DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
  476. dev_priv->csr.dc_state, val);
  477. dev_priv->csr.dc_state = val;
  478. }
  479. /**
  480. * gen9_set_dc_state - set target display C power state
  481. * @dev_priv: i915 device instance
  482. * @state: target DC power state
  483. * - DC_STATE_DISABLE
  484. * - DC_STATE_EN_UPTO_DC5
  485. * - DC_STATE_EN_UPTO_DC6
  486. * - DC_STATE_EN_DC9
  487. *
  488. * Signal to DMC firmware/HW the target DC power state passed in @state.
  489. * DMC/HW can turn off individual display clocks and power rails when entering
  490. * a deeper DC power state (higher in number) and turns these back when exiting
  491. * that state to a shallower power state (lower in number). The HW will decide
  492. * when to actually enter a given state on an on-demand basis, for instance
  493. * depending on the active state of display pipes. The state of display
  494. * registers backed by affected power rails are saved/restored as needed.
  495. *
  496. * Based on the above enabling a deeper DC power state is asynchronous wrt.
  497. * enabling it. Disabling a deeper power state is synchronous: for instance
  498. * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
  499. * back on and register state is restored. This is guaranteed by the MMIO write
  500. * to DC_STATE_EN blocking until the state is restored.
  501. */
  502. static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
  503. {
  504. uint32_t val;
  505. uint32_t mask;
  506. if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
  507. state &= dev_priv->csr.allowed_dc_mask;
  508. val = I915_READ(DC_STATE_EN);
  509. mask = gen9_dc_mask(dev_priv);
  510. DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
  511. val & mask, state);
  512. /* Check if DMC is ignoring our DC state requests */
  513. if ((val & mask) != dev_priv->csr.dc_state)
  514. DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
  515. dev_priv->csr.dc_state, val & mask);
  516. val &= ~mask;
  517. val |= state;
  518. gen9_write_dc_state(dev_priv, val);
  519. dev_priv->csr.dc_state = val & mask;
  520. }
  521. void bxt_enable_dc9(struct drm_i915_private *dev_priv)
  522. {
  523. assert_can_enable_dc9(dev_priv);
  524. DRM_DEBUG_KMS("Enabling DC9\n");
  525. intel_power_sequencer_reset(dev_priv);
  526. gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
  527. }
  528. void bxt_disable_dc9(struct drm_i915_private *dev_priv)
  529. {
  530. assert_can_disable_dc9(dev_priv);
  531. DRM_DEBUG_KMS("Disabling DC9\n");
  532. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  533. intel_pps_unlock_regs_wa(dev_priv);
  534. }
  535. static void assert_csr_loaded(struct drm_i915_private *dev_priv)
  536. {
  537. WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
  538. "CSR program storage start is NULL\n");
  539. WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
  540. WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
  541. }
  542. static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
  543. {
  544. bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
  545. SKL_DISP_PW_2);
  546. WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
  547. WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
  548. "DC5 already programmed to be enabled.\n");
  549. assert_rpm_wakelock_held(dev_priv);
  550. assert_csr_loaded(dev_priv);
  551. }
  552. void gen9_enable_dc5(struct drm_i915_private *dev_priv)
  553. {
  554. assert_can_enable_dc5(dev_priv);
  555. DRM_DEBUG_KMS("Enabling DC5\n");
  556. /* Wa Display #1183: skl,kbl,cfl */
  557. if (IS_GEN9_BC(dev_priv))
  558. I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
  559. SKL_SELECT_ALTERNATE_DC_EXIT);
  560. gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
  561. }
  562. static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
  563. {
  564. WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
  565. "Backlight is not disabled.\n");
  566. WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
  567. "DC6 already programmed to be enabled.\n");
  568. assert_csr_loaded(dev_priv);
  569. }
  570. static void skl_enable_dc6(struct drm_i915_private *dev_priv)
  571. {
  572. assert_can_enable_dc6(dev_priv);
  573. DRM_DEBUG_KMS("Enabling DC6\n");
  574. /* Wa Display #1183: skl,kbl,cfl */
  575. if (IS_GEN9_BC(dev_priv))
  576. I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
  577. SKL_SELECT_ALTERNATE_DC_EXIT);
  578. gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
  579. }
  580. static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
  581. struct i915_power_well *power_well)
  582. {
  583. enum i915_power_well_id id = power_well->id;
  584. u32 mask = HSW_PWR_WELL_CTL_REQ(id);
  585. u32 bios_req = I915_READ(HSW_PWR_WELL_CTL_BIOS(id));
  586. /* Take over the request bit if set by BIOS. */
  587. if (bios_req & mask) {
  588. u32 drv_req = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
  589. if (!(drv_req & mask))
  590. I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), drv_req | mask);
  591. I915_WRITE(HSW_PWR_WELL_CTL_BIOS(id), bios_req & ~mask);
  592. }
  593. }
  594. static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  595. struct i915_power_well *power_well)
  596. {
  597. bxt_ddi_phy_init(dev_priv, power_well->bxt.phy);
  598. }
  599. static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  600. struct i915_power_well *power_well)
  601. {
  602. bxt_ddi_phy_uninit(dev_priv, power_well->bxt.phy);
  603. }
  604. static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
  605. struct i915_power_well *power_well)
  606. {
  607. return bxt_ddi_phy_is_enabled(dev_priv, power_well->bxt.phy);
  608. }
  609. static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
  610. {
  611. struct i915_power_well *power_well;
  612. power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
  613. if (power_well->count > 0)
  614. bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
  615. power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
  616. if (power_well->count > 0)
  617. bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
  618. if (IS_GEMINILAKE(dev_priv)) {
  619. power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
  620. if (power_well->count > 0)
  621. bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
  622. }
  623. }
  624. static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
  625. struct i915_power_well *power_well)
  626. {
  627. return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
  628. }
  629. static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
  630. {
  631. u32 tmp = I915_READ(DBUF_CTL);
  632. WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
  633. (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
  634. "Unexpected DBuf power power state (0x%08x)\n", tmp);
  635. }
  636. static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
  637. struct i915_power_well *power_well)
  638. {
  639. struct intel_cdclk_state cdclk_state = {};
  640. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  641. dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
  642. /* Can't read out voltage_level so can't use intel_cdclk_changed() */
  643. WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
  644. gen9_assert_dbuf_enabled(dev_priv);
  645. if (IS_GEN9_LP(dev_priv))
  646. bxt_verify_ddi_phy_power_wells(dev_priv);
  647. }
  648. static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
  649. struct i915_power_well *power_well)
  650. {
  651. if (!dev_priv->csr.dmc_payload)
  652. return;
  653. if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
  654. skl_enable_dc6(dev_priv);
  655. else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
  656. gen9_enable_dc5(dev_priv);
  657. }
  658. static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
  659. struct i915_power_well *power_well)
  660. {
  661. }
  662. static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
  663. struct i915_power_well *power_well)
  664. {
  665. }
  666. static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
  667. struct i915_power_well *power_well)
  668. {
  669. return true;
  670. }
  671. static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
  672. struct i915_power_well *power_well)
  673. {
  674. if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
  675. i830_enable_pipe(dev_priv, PIPE_A);
  676. if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
  677. i830_enable_pipe(dev_priv, PIPE_B);
  678. }
  679. static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
  680. struct i915_power_well *power_well)
  681. {
  682. i830_disable_pipe(dev_priv, PIPE_B);
  683. i830_disable_pipe(dev_priv, PIPE_A);
  684. }
  685. static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
  686. struct i915_power_well *power_well)
  687. {
  688. return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
  689. I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
  690. }
  691. static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
  692. struct i915_power_well *power_well)
  693. {
  694. if (power_well->count > 0)
  695. i830_pipes_power_well_enable(dev_priv, power_well);
  696. else
  697. i830_pipes_power_well_disable(dev_priv, power_well);
  698. }
  699. static void vlv_set_power_well(struct drm_i915_private *dev_priv,
  700. struct i915_power_well *power_well, bool enable)
  701. {
  702. enum i915_power_well_id power_well_id = power_well->id;
  703. u32 mask;
  704. u32 state;
  705. u32 ctrl;
  706. mask = PUNIT_PWRGT_MASK(power_well_id);
  707. state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
  708. PUNIT_PWRGT_PWR_GATE(power_well_id);
  709. mutex_lock(&dev_priv->pcu_lock);
  710. #define COND \
  711. ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
  712. if (COND)
  713. goto out;
  714. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
  715. ctrl &= ~mask;
  716. ctrl |= state;
  717. vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
  718. if (wait_for(COND, 100))
  719. DRM_ERROR("timeout setting power well state %08x (%08x)\n",
  720. state,
  721. vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
  722. #undef COND
  723. out:
  724. mutex_unlock(&dev_priv->pcu_lock);
  725. }
  726. static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
  727. struct i915_power_well *power_well)
  728. {
  729. vlv_set_power_well(dev_priv, power_well, true);
  730. }
  731. static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
  732. struct i915_power_well *power_well)
  733. {
  734. vlv_set_power_well(dev_priv, power_well, false);
  735. }
  736. static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
  737. struct i915_power_well *power_well)
  738. {
  739. enum i915_power_well_id power_well_id = power_well->id;
  740. bool enabled = false;
  741. u32 mask;
  742. u32 state;
  743. u32 ctrl;
  744. mask = PUNIT_PWRGT_MASK(power_well_id);
  745. ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
  746. mutex_lock(&dev_priv->pcu_lock);
  747. state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
  748. /*
  749. * We only ever set the power-on and power-gate states, anything
  750. * else is unexpected.
  751. */
  752. WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
  753. state != PUNIT_PWRGT_PWR_GATE(power_well_id));
  754. if (state == ctrl)
  755. enabled = true;
  756. /*
  757. * A transient state at this point would mean some unexpected party
  758. * is poking at the power controls too.
  759. */
  760. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
  761. WARN_ON(ctrl != state);
  762. mutex_unlock(&dev_priv->pcu_lock);
  763. return enabled;
  764. }
  765. static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
  766. {
  767. u32 val;
  768. /*
  769. * On driver load, a pipe may be active and driving a DSI display.
  770. * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
  771. * (and never recovering) in this case. intel_dsi_post_disable() will
  772. * clear it when we turn off the display.
  773. */
  774. val = I915_READ(DSPCLK_GATE_D);
  775. val &= DPOUNIT_CLOCK_GATE_DISABLE;
  776. val |= VRHUNIT_CLOCK_GATE_DISABLE;
  777. I915_WRITE(DSPCLK_GATE_D, val);
  778. /*
  779. * Disable trickle feed and enable pnd deadline calculation
  780. */
  781. I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
  782. I915_WRITE(CBR1_VLV, 0);
  783. WARN_ON(dev_priv->rawclk_freq == 0);
  784. I915_WRITE(RAWCLK_FREQ_VLV,
  785. DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
  786. }
  787. static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
  788. {
  789. struct intel_encoder *encoder;
  790. enum pipe pipe;
  791. /*
  792. * Enable the CRI clock source so we can get at the
  793. * display and the reference clock for VGA
  794. * hotplug / manual detection. Supposedly DSI also
  795. * needs the ref clock up and running.
  796. *
  797. * CHV DPLL B/C have some issues if VGA mode is enabled.
  798. */
  799. for_each_pipe(dev_priv, pipe) {
  800. u32 val = I915_READ(DPLL(pipe));
  801. val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  802. if (pipe != PIPE_A)
  803. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  804. I915_WRITE(DPLL(pipe), val);
  805. }
  806. vlv_init_display_clock_gating(dev_priv);
  807. spin_lock_irq(&dev_priv->irq_lock);
  808. valleyview_enable_display_irqs(dev_priv);
  809. spin_unlock_irq(&dev_priv->irq_lock);
  810. /*
  811. * During driver initialization/resume we can avoid restoring the
  812. * part of the HW/SW state that will be inited anyway explicitly.
  813. */
  814. if (dev_priv->power_domains.initializing)
  815. return;
  816. intel_hpd_init(dev_priv);
  817. /* Re-enable the ADPA, if we have one */
  818. for_each_intel_encoder(&dev_priv->drm, encoder) {
  819. if (encoder->type == INTEL_OUTPUT_ANALOG)
  820. intel_crt_reset(&encoder->base);
  821. }
  822. i915_redisable_vga_power_on(dev_priv);
  823. intel_pps_unlock_regs_wa(dev_priv);
  824. }
  825. static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
  826. {
  827. spin_lock_irq(&dev_priv->irq_lock);
  828. valleyview_disable_display_irqs(dev_priv);
  829. spin_unlock_irq(&dev_priv->irq_lock);
  830. /* make sure we're done processing display irqs */
  831. synchronize_irq(dev_priv->drm.irq);
  832. intel_power_sequencer_reset(dev_priv);
  833. /* Prevent us from re-enabling polling on accident in late suspend */
  834. if (!dev_priv->drm.dev->power.is_suspended)
  835. intel_hpd_poll_init(dev_priv);
  836. }
  837. static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
  838. struct i915_power_well *power_well)
  839. {
  840. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
  841. vlv_set_power_well(dev_priv, power_well, true);
  842. vlv_display_power_well_init(dev_priv);
  843. }
  844. static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
  845. struct i915_power_well *power_well)
  846. {
  847. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
  848. vlv_display_power_well_deinit(dev_priv);
  849. vlv_set_power_well(dev_priv, power_well, false);
  850. }
  851. static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  852. struct i915_power_well *power_well)
  853. {
  854. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
  855. /* since ref/cri clock was enabled */
  856. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  857. vlv_set_power_well(dev_priv, power_well, true);
  858. /*
  859. * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
  860. * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
  861. * a. GUnit 0x2110 bit[0] set to 1 (def 0)
  862. * b. The other bits such as sfr settings / modesel may all
  863. * be set to 0.
  864. *
  865. * This should only be done on init and resume from S3 with
  866. * both PLLs disabled, or we risk losing DPIO and PLL
  867. * synchronization.
  868. */
  869. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
  870. }
  871. static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  872. struct i915_power_well *power_well)
  873. {
  874. enum pipe pipe;
  875. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
  876. for_each_pipe(dev_priv, pipe)
  877. assert_pll_disabled(dev_priv, pipe);
  878. /* Assert common reset */
  879. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
  880. vlv_set_power_well(dev_priv, power_well, false);
  881. }
  882. #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
  883. static struct i915_power_well *
  884. lookup_power_well(struct drm_i915_private *dev_priv,
  885. enum i915_power_well_id power_well_id)
  886. {
  887. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  888. int i;
  889. for (i = 0; i < power_domains->power_well_count; i++) {
  890. struct i915_power_well *power_well;
  891. power_well = &power_domains->power_wells[i];
  892. if (power_well->id == power_well_id)
  893. return power_well;
  894. }
  895. return NULL;
  896. }
  897. #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
  898. static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
  899. {
  900. struct i915_power_well *cmn_bc =
  901. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  902. struct i915_power_well *cmn_d =
  903. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
  904. u32 phy_control = dev_priv->chv_phy_control;
  905. u32 phy_status = 0;
  906. u32 phy_status_mask = 0xffffffff;
  907. /*
  908. * The BIOS can leave the PHY is some weird state
  909. * where it doesn't fully power down some parts.
  910. * Disable the asserts until the PHY has been fully
  911. * reset (ie. the power well has been disabled at
  912. * least once).
  913. */
  914. if (!dev_priv->chv_phy_assert[DPIO_PHY0])
  915. phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
  916. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
  917. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
  918. PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
  919. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
  920. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
  921. if (!dev_priv->chv_phy_assert[DPIO_PHY1])
  922. phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
  923. PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
  924. PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
  925. if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
  926. phy_status |= PHY_POWERGOOD(DPIO_PHY0);
  927. /* this assumes override is only used to enable lanes */
  928. if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
  929. phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
  930. if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
  931. phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
  932. /* CL1 is on whenever anything is on in either channel */
  933. if (BITS_SET(phy_control,
  934. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
  935. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
  936. phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
  937. /*
  938. * The DPLLB check accounts for the pipe B + port A usage
  939. * with CL2 powered up but all the lanes in the second channel
  940. * powered down.
  941. */
  942. if (BITS_SET(phy_control,
  943. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
  944. (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
  945. phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
  946. if (BITS_SET(phy_control,
  947. PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
  948. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
  949. if (BITS_SET(phy_control,
  950. PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
  951. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
  952. if (BITS_SET(phy_control,
  953. PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
  954. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
  955. if (BITS_SET(phy_control,
  956. PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
  957. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
  958. }
  959. if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
  960. phy_status |= PHY_POWERGOOD(DPIO_PHY1);
  961. /* this assumes override is only used to enable lanes */
  962. if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
  963. phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
  964. if (BITS_SET(phy_control,
  965. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
  966. phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
  967. if (BITS_SET(phy_control,
  968. PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
  969. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
  970. if (BITS_SET(phy_control,
  971. PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
  972. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
  973. }
  974. phy_status &= phy_status_mask;
  975. /*
  976. * The PHY may be busy with some initial calibration and whatnot,
  977. * so the power state can take a while to actually change.
  978. */
  979. if (intel_wait_for_register(dev_priv,
  980. DISPLAY_PHY_STATUS,
  981. phy_status_mask,
  982. phy_status,
  983. 10))
  984. DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
  985. I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
  986. phy_status, dev_priv->chv_phy_control);
  987. }
  988. #undef BITS_SET
  989. static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  990. struct i915_power_well *power_well)
  991. {
  992. enum dpio_phy phy;
  993. enum pipe pipe;
  994. uint32_t tmp;
  995. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  996. power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
  997. if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  998. pipe = PIPE_A;
  999. phy = DPIO_PHY0;
  1000. } else {
  1001. pipe = PIPE_C;
  1002. phy = DPIO_PHY1;
  1003. }
  1004. /* since ref/cri clock was enabled */
  1005. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  1006. vlv_set_power_well(dev_priv, power_well, true);
  1007. /* Poll for phypwrgood signal */
  1008. if (intel_wait_for_register(dev_priv,
  1009. DISPLAY_PHY_STATUS,
  1010. PHY_POWERGOOD(phy),
  1011. PHY_POWERGOOD(phy),
  1012. 1))
  1013. DRM_ERROR("Display PHY %d is not power up\n", phy);
  1014. mutex_lock(&dev_priv->sb_lock);
  1015. /* Enable dynamic power down */
  1016. tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
  1017. tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
  1018. DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
  1019. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
  1020. if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  1021. tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
  1022. tmp |= DPIO_DYNPWRDOWNEN_CH1;
  1023. vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
  1024. } else {
  1025. /*
  1026. * Force the non-existing CL2 off. BXT does this
  1027. * too, so maybe it saves some power even though
  1028. * CL2 doesn't exist?
  1029. */
  1030. tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
  1031. tmp |= DPIO_CL2_LDOFUSE_PWRENB;
  1032. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
  1033. }
  1034. mutex_unlock(&dev_priv->sb_lock);
  1035. dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
  1036. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1037. DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
  1038. phy, dev_priv->chv_phy_control);
  1039. assert_chv_phy_status(dev_priv);
  1040. }
  1041. static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  1042. struct i915_power_well *power_well)
  1043. {
  1044. enum dpio_phy phy;
  1045. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  1046. power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
  1047. if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  1048. phy = DPIO_PHY0;
  1049. assert_pll_disabled(dev_priv, PIPE_A);
  1050. assert_pll_disabled(dev_priv, PIPE_B);
  1051. } else {
  1052. phy = DPIO_PHY1;
  1053. assert_pll_disabled(dev_priv, PIPE_C);
  1054. }
  1055. dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
  1056. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1057. vlv_set_power_well(dev_priv, power_well, false);
  1058. DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
  1059. phy, dev_priv->chv_phy_control);
  1060. /* PHY is fully reset now, so we can enable the PHY state asserts */
  1061. dev_priv->chv_phy_assert[phy] = true;
  1062. assert_chv_phy_status(dev_priv);
  1063. }
  1064. static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
  1065. enum dpio_channel ch, bool override, unsigned int mask)
  1066. {
  1067. enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
  1068. u32 reg, val, expected, actual;
  1069. /*
  1070. * The BIOS can leave the PHY is some weird state
  1071. * where it doesn't fully power down some parts.
  1072. * Disable the asserts until the PHY has been fully
  1073. * reset (ie. the power well has been disabled at
  1074. * least once).
  1075. */
  1076. if (!dev_priv->chv_phy_assert[phy])
  1077. return;
  1078. if (ch == DPIO_CH0)
  1079. reg = _CHV_CMN_DW0_CH0;
  1080. else
  1081. reg = _CHV_CMN_DW6_CH1;
  1082. mutex_lock(&dev_priv->sb_lock);
  1083. val = vlv_dpio_read(dev_priv, pipe, reg);
  1084. mutex_unlock(&dev_priv->sb_lock);
  1085. /*
  1086. * This assumes !override is only used when the port is disabled.
  1087. * All lanes should power down even without the override when
  1088. * the port is disabled.
  1089. */
  1090. if (!override || mask == 0xf) {
  1091. expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
  1092. /*
  1093. * If CH1 common lane is not active anymore
  1094. * (eg. for pipe B DPLL) the entire channel will
  1095. * shut down, which causes the common lane registers
  1096. * to read as 0. That means we can't actually check
  1097. * the lane power down status bits, but as the entire
  1098. * register reads as 0 it's a good indication that the
  1099. * channel is indeed entirely powered down.
  1100. */
  1101. if (ch == DPIO_CH1 && val == 0)
  1102. expected = 0;
  1103. } else if (mask != 0x0) {
  1104. expected = DPIO_ANYDL_POWERDOWN;
  1105. } else {
  1106. expected = 0;
  1107. }
  1108. if (ch == DPIO_CH0)
  1109. actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
  1110. else
  1111. actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
  1112. actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
  1113. WARN(actual != expected,
  1114. "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
  1115. !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
  1116. !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
  1117. reg, val);
  1118. }
  1119. bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
  1120. enum dpio_channel ch, bool override)
  1121. {
  1122. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1123. bool was_override;
  1124. mutex_lock(&power_domains->lock);
  1125. was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1126. if (override == was_override)
  1127. goto out;
  1128. if (override)
  1129. dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1130. else
  1131. dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1132. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1133. DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
  1134. phy, ch, dev_priv->chv_phy_control);
  1135. assert_chv_phy_status(dev_priv);
  1136. out:
  1137. mutex_unlock(&power_domains->lock);
  1138. return was_override;
  1139. }
  1140. void chv_phy_powergate_lanes(struct intel_encoder *encoder,
  1141. bool override, unsigned int mask)
  1142. {
  1143. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  1144. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1145. enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
  1146. enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
  1147. mutex_lock(&power_domains->lock);
  1148. dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
  1149. dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
  1150. if (override)
  1151. dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1152. else
  1153. dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1154. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1155. DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
  1156. phy, ch, mask, dev_priv->chv_phy_control);
  1157. assert_chv_phy_status(dev_priv);
  1158. assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
  1159. mutex_unlock(&power_domains->lock);
  1160. }
  1161. static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
  1162. struct i915_power_well *power_well)
  1163. {
  1164. enum pipe pipe = PIPE_A;
  1165. bool enabled;
  1166. u32 state, ctrl;
  1167. mutex_lock(&dev_priv->pcu_lock);
  1168. state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
  1169. /*
  1170. * We only ever set the power-on and power-gate states, anything
  1171. * else is unexpected.
  1172. */
  1173. WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
  1174. enabled = state == DP_SSS_PWR_ON(pipe);
  1175. /*
  1176. * A transient state at this point would mean some unexpected party
  1177. * is poking at the power controls too.
  1178. */
  1179. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
  1180. WARN_ON(ctrl << 16 != state);
  1181. mutex_unlock(&dev_priv->pcu_lock);
  1182. return enabled;
  1183. }
  1184. static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
  1185. struct i915_power_well *power_well,
  1186. bool enable)
  1187. {
  1188. enum pipe pipe = PIPE_A;
  1189. u32 state;
  1190. u32 ctrl;
  1191. state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
  1192. mutex_lock(&dev_priv->pcu_lock);
  1193. #define COND \
  1194. ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
  1195. if (COND)
  1196. goto out;
  1197. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  1198. ctrl &= ~DP_SSC_MASK(pipe);
  1199. ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
  1200. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
  1201. if (wait_for(COND, 100))
  1202. DRM_ERROR("timeout setting power well state %08x (%08x)\n",
  1203. state,
  1204. vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
  1205. #undef COND
  1206. out:
  1207. mutex_unlock(&dev_priv->pcu_lock);
  1208. }
  1209. static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
  1210. struct i915_power_well *power_well)
  1211. {
  1212. WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
  1213. chv_set_pipe_power_well(dev_priv, power_well, true);
  1214. vlv_display_power_well_init(dev_priv);
  1215. }
  1216. static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
  1217. struct i915_power_well *power_well)
  1218. {
  1219. WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
  1220. vlv_display_power_well_deinit(dev_priv);
  1221. chv_set_pipe_power_well(dev_priv, power_well, false);
  1222. }
  1223. static void
  1224. __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
  1225. enum intel_display_power_domain domain)
  1226. {
  1227. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1228. struct i915_power_well *power_well;
  1229. for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
  1230. intel_power_well_get(dev_priv, power_well);
  1231. power_domains->domain_use_count[domain]++;
  1232. }
  1233. /**
  1234. * intel_display_power_get - grab a power domain reference
  1235. * @dev_priv: i915 device instance
  1236. * @domain: power domain to reference
  1237. *
  1238. * This function grabs a power domain reference for @domain and ensures that the
  1239. * power domain and all its parents are powered up. Therefore users should only
  1240. * grab a reference to the innermost power domain they need.
  1241. *
  1242. * Any power domain reference obtained by this function must have a symmetric
  1243. * call to intel_display_power_put() to release the reference again.
  1244. */
  1245. void intel_display_power_get(struct drm_i915_private *dev_priv,
  1246. enum intel_display_power_domain domain)
  1247. {
  1248. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1249. intel_runtime_pm_get(dev_priv);
  1250. mutex_lock(&power_domains->lock);
  1251. __intel_display_power_get_domain(dev_priv, domain);
  1252. mutex_unlock(&power_domains->lock);
  1253. }
  1254. /**
  1255. * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
  1256. * @dev_priv: i915 device instance
  1257. * @domain: power domain to reference
  1258. *
  1259. * This function grabs a power domain reference for @domain and ensures that the
  1260. * power domain and all its parents are powered up. Therefore users should only
  1261. * grab a reference to the innermost power domain they need.
  1262. *
  1263. * Any power domain reference obtained by this function must have a symmetric
  1264. * call to intel_display_power_put() to release the reference again.
  1265. */
  1266. bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
  1267. enum intel_display_power_domain domain)
  1268. {
  1269. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1270. bool is_enabled;
  1271. if (!intel_runtime_pm_get_if_in_use(dev_priv))
  1272. return false;
  1273. mutex_lock(&power_domains->lock);
  1274. if (__intel_display_power_is_enabled(dev_priv, domain)) {
  1275. __intel_display_power_get_domain(dev_priv, domain);
  1276. is_enabled = true;
  1277. } else {
  1278. is_enabled = false;
  1279. }
  1280. mutex_unlock(&power_domains->lock);
  1281. if (!is_enabled)
  1282. intel_runtime_pm_put(dev_priv);
  1283. return is_enabled;
  1284. }
  1285. /**
  1286. * intel_display_power_put - release a power domain reference
  1287. * @dev_priv: i915 device instance
  1288. * @domain: power domain to reference
  1289. *
  1290. * This function drops the power domain reference obtained by
  1291. * intel_display_power_get() and might power down the corresponding hardware
  1292. * block right away if this is the last reference.
  1293. */
  1294. void intel_display_power_put(struct drm_i915_private *dev_priv,
  1295. enum intel_display_power_domain domain)
  1296. {
  1297. struct i915_power_domains *power_domains;
  1298. struct i915_power_well *power_well;
  1299. power_domains = &dev_priv->power_domains;
  1300. mutex_lock(&power_domains->lock);
  1301. WARN(!power_domains->domain_use_count[domain],
  1302. "Use count on domain %s is already zero\n",
  1303. intel_display_power_domain_str(domain));
  1304. power_domains->domain_use_count[domain]--;
  1305. for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
  1306. intel_power_well_put(dev_priv, power_well);
  1307. mutex_unlock(&power_domains->lock);
  1308. intel_runtime_pm_put(dev_priv);
  1309. }
  1310. #define I830_PIPES_POWER_DOMAINS ( \
  1311. BIT_ULL(POWER_DOMAIN_PIPE_A) | \
  1312. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  1313. BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  1314. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1315. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  1316. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  1317. BIT_ULL(POWER_DOMAIN_INIT))
  1318. #define VLV_DISPLAY_POWER_DOMAINS ( \
  1319. BIT_ULL(POWER_DOMAIN_PIPE_A) | \
  1320. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  1321. BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  1322. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1323. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  1324. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  1325. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1326. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1327. BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
  1328. BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
  1329. BIT_ULL(POWER_DOMAIN_VGA) | \
  1330. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  1331. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1332. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1333. BIT_ULL(POWER_DOMAIN_GMBUS) | \
  1334. BIT_ULL(POWER_DOMAIN_INIT))
  1335. #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
  1336. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1337. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1338. BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
  1339. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1340. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1341. BIT_ULL(POWER_DOMAIN_INIT))
  1342. #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
  1343. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1344. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1345. BIT_ULL(POWER_DOMAIN_INIT))
  1346. #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
  1347. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1348. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1349. BIT_ULL(POWER_DOMAIN_INIT))
  1350. #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
  1351. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1352. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1353. BIT_ULL(POWER_DOMAIN_INIT))
  1354. #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
  1355. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1356. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1357. BIT_ULL(POWER_DOMAIN_INIT))
  1358. #define CHV_DISPLAY_POWER_DOMAINS ( \
  1359. BIT_ULL(POWER_DOMAIN_PIPE_A) | \
  1360. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  1361. BIT_ULL(POWER_DOMAIN_PIPE_C) | \
  1362. BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  1363. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1364. BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1365. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  1366. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  1367. BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
  1368. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1369. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1370. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1371. BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
  1372. BIT_ULL(POWER_DOMAIN_VGA) | \
  1373. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  1374. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1375. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1376. BIT_ULL(POWER_DOMAIN_AUX_D) | \
  1377. BIT_ULL(POWER_DOMAIN_GMBUS) | \
  1378. BIT_ULL(POWER_DOMAIN_INIT))
  1379. #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
  1380. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1381. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1382. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1383. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1384. BIT_ULL(POWER_DOMAIN_INIT))
  1385. #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
  1386. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1387. BIT_ULL(POWER_DOMAIN_AUX_D) | \
  1388. BIT_ULL(POWER_DOMAIN_INIT))
  1389. #define HSW_DISPLAY_POWER_DOMAINS ( \
  1390. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  1391. BIT_ULL(POWER_DOMAIN_PIPE_C) | \
  1392. BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  1393. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1394. BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1395. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  1396. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  1397. BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
  1398. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1399. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1400. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1401. BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
  1402. BIT_ULL(POWER_DOMAIN_VGA) | \
  1403. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  1404. BIT_ULL(POWER_DOMAIN_INIT))
  1405. #define BDW_DISPLAY_POWER_DOMAINS ( \
  1406. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  1407. BIT_ULL(POWER_DOMAIN_PIPE_C) | \
  1408. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1409. BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1410. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  1411. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  1412. BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
  1413. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1414. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1415. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1416. BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
  1417. BIT_ULL(POWER_DOMAIN_VGA) | \
  1418. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  1419. BIT_ULL(POWER_DOMAIN_INIT))
  1420. #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  1421. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  1422. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  1423. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  1424. BIT_ULL(POWER_DOMAIN_PIPE_C) | \
  1425. BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
  1426. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1427. BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1428. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1429. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1430. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1431. BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
  1432. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1433. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1434. BIT_ULL(POWER_DOMAIN_AUX_D) | \
  1435. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  1436. BIT_ULL(POWER_DOMAIN_VGA) | \
  1437. BIT_ULL(POWER_DOMAIN_INIT))
  1438. #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
  1439. BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
  1440. BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
  1441. BIT_ULL(POWER_DOMAIN_INIT))
  1442. #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
  1443. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
  1444. BIT_ULL(POWER_DOMAIN_INIT))
  1445. #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
  1446. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
  1447. BIT_ULL(POWER_DOMAIN_INIT))
  1448. #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
  1449. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
  1450. BIT_ULL(POWER_DOMAIN_INIT))
  1451. #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
  1452. SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  1453. BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
  1454. BIT_ULL(POWER_DOMAIN_MODESET) | \
  1455. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  1456. BIT_ULL(POWER_DOMAIN_INIT))
  1457. #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  1458. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  1459. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  1460. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  1461. BIT_ULL(POWER_DOMAIN_PIPE_C) | \
  1462. BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
  1463. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1464. BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1465. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1466. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1467. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1468. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1469. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  1470. BIT_ULL(POWER_DOMAIN_VGA) | \
  1471. BIT_ULL(POWER_DOMAIN_INIT))
  1472. #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
  1473. BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  1474. BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
  1475. BIT_ULL(POWER_DOMAIN_MODESET) | \
  1476. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  1477. BIT_ULL(POWER_DOMAIN_GMBUS) | \
  1478. BIT_ULL(POWER_DOMAIN_INIT))
  1479. #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
  1480. BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
  1481. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  1482. BIT_ULL(POWER_DOMAIN_INIT))
  1483. #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
  1484. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1485. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1486. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1487. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1488. BIT_ULL(POWER_DOMAIN_INIT))
  1489. #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  1490. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  1491. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  1492. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  1493. BIT_ULL(POWER_DOMAIN_PIPE_C) | \
  1494. BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
  1495. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1496. BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1497. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1498. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1499. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1500. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1501. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  1502. BIT_ULL(POWER_DOMAIN_VGA) | \
  1503. BIT_ULL(POWER_DOMAIN_INIT))
  1504. #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
  1505. BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
  1506. #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
  1507. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
  1508. #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
  1509. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
  1510. #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
  1511. BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
  1512. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  1513. BIT_ULL(POWER_DOMAIN_INIT))
  1514. #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
  1515. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1516. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1517. BIT_ULL(POWER_DOMAIN_INIT))
  1518. #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
  1519. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1520. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1521. BIT_ULL(POWER_DOMAIN_INIT))
  1522. #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
  1523. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  1524. BIT_ULL(POWER_DOMAIN_INIT))
  1525. #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
  1526. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1527. BIT_ULL(POWER_DOMAIN_INIT))
  1528. #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
  1529. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1530. BIT_ULL(POWER_DOMAIN_INIT))
  1531. #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
  1532. GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  1533. BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
  1534. BIT_ULL(POWER_DOMAIN_MODESET) | \
  1535. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  1536. BIT_ULL(POWER_DOMAIN_GMBUS) | \
  1537. BIT_ULL(POWER_DOMAIN_INIT))
  1538. #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  1539. BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
  1540. BIT_ULL(POWER_DOMAIN_PIPE_B) | \
  1541. BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
  1542. BIT_ULL(POWER_DOMAIN_PIPE_C) | \
  1543. BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
  1544. BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1545. BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1546. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1547. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1548. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1549. BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
  1550. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1551. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1552. BIT_ULL(POWER_DOMAIN_AUX_D) | \
  1553. BIT_ULL(POWER_DOMAIN_AUX_F) | \
  1554. BIT_ULL(POWER_DOMAIN_AUDIO) | \
  1555. BIT_ULL(POWER_DOMAIN_VGA) | \
  1556. BIT_ULL(POWER_DOMAIN_INIT))
  1557. #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
  1558. BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
  1559. BIT_ULL(POWER_DOMAIN_INIT))
  1560. #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
  1561. BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
  1562. BIT_ULL(POWER_DOMAIN_INIT))
  1563. #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
  1564. BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
  1565. BIT_ULL(POWER_DOMAIN_INIT))
  1566. #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
  1567. BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
  1568. BIT_ULL(POWER_DOMAIN_INIT))
  1569. #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
  1570. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  1571. BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
  1572. BIT_ULL(POWER_DOMAIN_INIT))
  1573. #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
  1574. BIT_ULL(POWER_DOMAIN_AUX_B) | \
  1575. BIT_ULL(POWER_DOMAIN_INIT))
  1576. #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
  1577. BIT_ULL(POWER_DOMAIN_AUX_C) | \
  1578. BIT_ULL(POWER_DOMAIN_INIT))
  1579. #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
  1580. BIT_ULL(POWER_DOMAIN_AUX_D) | \
  1581. BIT_ULL(POWER_DOMAIN_INIT))
  1582. #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
  1583. BIT_ULL(POWER_DOMAIN_AUX_F) | \
  1584. BIT_ULL(POWER_DOMAIN_INIT))
  1585. #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
  1586. BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
  1587. BIT_ULL(POWER_DOMAIN_INIT))
  1588. #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
  1589. CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  1590. BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
  1591. BIT_ULL(POWER_DOMAIN_MODESET) | \
  1592. BIT_ULL(POWER_DOMAIN_AUX_A) | \
  1593. BIT_ULL(POWER_DOMAIN_INIT))
  1594. static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
  1595. .sync_hw = i9xx_power_well_sync_hw_noop,
  1596. .enable = i9xx_always_on_power_well_noop,
  1597. .disable = i9xx_always_on_power_well_noop,
  1598. .is_enabled = i9xx_always_on_power_well_enabled,
  1599. };
  1600. static const struct i915_power_well_ops chv_pipe_power_well_ops = {
  1601. .sync_hw = i9xx_power_well_sync_hw_noop,
  1602. .enable = chv_pipe_power_well_enable,
  1603. .disable = chv_pipe_power_well_disable,
  1604. .is_enabled = chv_pipe_power_well_enabled,
  1605. };
  1606. static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
  1607. .sync_hw = i9xx_power_well_sync_hw_noop,
  1608. .enable = chv_dpio_cmn_power_well_enable,
  1609. .disable = chv_dpio_cmn_power_well_disable,
  1610. .is_enabled = vlv_power_well_enabled,
  1611. };
  1612. static struct i915_power_well i9xx_always_on_power_well[] = {
  1613. {
  1614. .name = "always-on",
  1615. .always_on = 1,
  1616. .domains = POWER_DOMAIN_MASK,
  1617. .ops = &i9xx_always_on_power_well_ops,
  1618. .id = I915_DISP_PW_ALWAYS_ON,
  1619. },
  1620. };
  1621. static const struct i915_power_well_ops i830_pipes_power_well_ops = {
  1622. .sync_hw = i830_pipes_power_well_sync_hw,
  1623. .enable = i830_pipes_power_well_enable,
  1624. .disable = i830_pipes_power_well_disable,
  1625. .is_enabled = i830_pipes_power_well_enabled,
  1626. };
  1627. static struct i915_power_well i830_power_wells[] = {
  1628. {
  1629. .name = "always-on",
  1630. .always_on = 1,
  1631. .domains = POWER_DOMAIN_MASK,
  1632. .ops = &i9xx_always_on_power_well_ops,
  1633. .id = I915_DISP_PW_ALWAYS_ON,
  1634. },
  1635. {
  1636. .name = "pipes",
  1637. .domains = I830_PIPES_POWER_DOMAINS,
  1638. .ops = &i830_pipes_power_well_ops,
  1639. .id = I830_DISP_PW_PIPES,
  1640. },
  1641. };
  1642. static const struct i915_power_well_ops hsw_power_well_ops = {
  1643. .sync_hw = hsw_power_well_sync_hw,
  1644. .enable = hsw_power_well_enable,
  1645. .disable = hsw_power_well_disable,
  1646. .is_enabled = hsw_power_well_enabled,
  1647. };
  1648. static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
  1649. .sync_hw = i9xx_power_well_sync_hw_noop,
  1650. .enable = gen9_dc_off_power_well_enable,
  1651. .disable = gen9_dc_off_power_well_disable,
  1652. .is_enabled = gen9_dc_off_power_well_enabled,
  1653. };
  1654. static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
  1655. .sync_hw = i9xx_power_well_sync_hw_noop,
  1656. .enable = bxt_dpio_cmn_power_well_enable,
  1657. .disable = bxt_dpio_cmn_power_well_disable,
  1658. .is_enabled = bxt_dpio_cmn_power_well_enabled,
  1659. };
  1660. static struct i915_power_well hsw_power_wells[] = {
  1661. {
  1662. .name = "always-on",
  1663. .always_on = 1,
  1664. .domains = POWER_DOMAIN_MASK,
  1665. .ops = &i9xx_always_on_power_well_ops,
  1666. .id = I915_DISP_PW_ALWAYS_ON,
  1667. },
  1668. {
  1669. .name = "display",
  1670. .domains = HSW_DISPLAY_POWER_DOMAINS,
  1671. .ops = &hsw_power_well_ops,
  1672. .id = HSW_DISP_PW_GLOBAL,
  1673. {
  1674. .hsw.has_vga = true,
  1675. },
  1676. },
  1677. };
  1678. static struct i915_power_well bdw_power_wells[] = {
  1679. {
  1680. .name = "always-on",
  1681. .always_on = 1,
  1682. .domains = POWER_DOMAIN_MASK,
  1683. .ops = &i9xx_always_on_power_well_ops,
  1684. .id = I915_DISP_PW_ALWAYS_ON,
  1685. },
  1686. {
  1687. .name = "display",
  1688. .domains = BDW_DISPLAY_POWER_DOMAINS,
  1689. .ops = &hsw_power_well_ops,
  1690. .id = HSW_DISP_PW_GLOBAL,
  1691. {
  1692. .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
  1693. .hsw.has_vga = true,
  1694. },
  1695. },
  1696. };
  1697. static const struct i915_power_well_ops vlv_display_power_well_ops = {
  1698. .sync_hw = i9xx_power_well_sync_hw_noop,
  1699. .enable = vlv_display_power_well_enable,
  1700. .disable = vlv_display_power_well_disable,
  1701. .is_enabled = vlv_power_well_enabled,
  1702. };
  1703. static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
  1704. .sync_hw = i9xx_power_well_sync_hw_noop,
  1705. .enable = vlv_dpio_cmn_power_well_enable,
  1706. .disable = vlv_dpio_cmn_power_well_disable,
  1707. .is_enabled = vlv_power_well_enabled,
  1708. };
  1709. static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
  1710. .sync_hw = i9xx_power_well_sync_hw_noop,
  1711. .enable = vlv_power_well_enable,
  1712. .disable = vlv_power_well_disable,
  1713. .is_enabled = vlv_power_well_enabled,
  1714. };
  1715. static struct i915_power_well vlv_power_wells[] = {
  1716. {
  1717. .name = "always-on",
  1718. .always_on = 1,
  1719. .domains = POWER_DOMAIN_MASK,
  1720. .ops = &i9xx_always_on_power_well_ops,
  1721. .id = I915_DISP_PW_ALWAYS_ON,
  1722. },
  1723. {
  1724. .name = "display",
  1725. .domains = VLV_DISPLAY_POWER_DOMAINS,
  1726. .id = PUNIT_POWER_WELL_DISP2D,
  1727. .ops = &vlv_display_power_well_ops,
  1728. },
  1729. {
  1730. .name = "dpio-tx-b-01",
  1731. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1732. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1733. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1734. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1735. .ops = &vlv_dpio_power_well_ops,
  1736. .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
  1737. },
  1738. {
  1739. .name = "dpio-tx-b-23",
  1740. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1741. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1742. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1743. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1744. .ops = &vlv_dpio_power_well_ops,
  1745. .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
  1746. },
  1747. {
  1748. .name = "dpio-tx-c-01",
  1749. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1750. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1751. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1752. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1753. .ops = &vlv_dpio_power_well_ops,
  1754. .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
  1755. },
  1756. {
  1757. .name = "dpio-tx-c-23",
  1758. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1759. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1760. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1761. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1762. .ops = &vlv_dpio_power_well_ops,
  1763. .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
  1764. },
  1765. {
  1766. .name = "dpio-common",
  1767. .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
  1768. .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
  1769. .ops = &vlv_dpio_cmn_power_well_ops,
  1770. },
  1771. };
  1772. static struct i915_power_well chv_power_wells[] = {
  1773. {
  1774. .name = "always-on",
  1775. .always_on = 1,
  1776. .domains = POWER_DOMAIN_MASK,
  1777. .ops = &i9xx_always_on_power_well_ops,
  1778. .id = I915_DISP_PW_ALWAYS_ON,
  1779. },
  1780. {
  1781. .name = "display",
  1782. /*
  1783. * Pipe A power well is the new disp2d well. Pipe B and C
  1784. * power wells don't actually exist. Pipe A power well is
  1785. * required for any pipe to work.
  1786. */
  1787. .domains = CHV_DISPLAY_POWER_DOMAINS,
  1788. .id = CHV_DISP_PW_PIPE_A,
  1789. .ops = &chv_pipe_power_well_ops,
  1790. },
  1791. {
  1792. .name = "dpio-common-bc",
  1793. .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
  1794. .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
  1795. .ops = &chv_dpio_cmn_power_well_ops,
  1796. },
  1797. {
  1798. .name = "dpio-common-d",
  1799. .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
  1800. .id = PUNIT_POWER_WELL_DPIO_CMN_D,
  1801. .ops = &chv_dpio_cmn_power_well_ops,
  1802. },
  1803. };
  1804. bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  1805. enum i915_power_well_id power_well_id)
  1806. {
  1807. struct i915_power_well *power_well;
  1808. bool ret;
  1809. power_well = lookup_power_well(dev_priv, power_well_id);
  1810. ret = power_well->ops->is_enabled(dev_priv, power_well);
  1811. return ret;
  1812. }
  1813. static struct i915_power_well skl_power_wells[] = {
  1814. {
  1815. .name = "always-on",
  1816. .always_on = 1,
  1817. .domains = POWER_DOMAIN_MASK,
  1818. .ops = &i9xx_always_on_power_well_ops,
  1819. .id = I915_DISP_PW_ALWAYS_ON,
  1820. },
  1821. {
  1822. .name = "power well 1",
  1823. /* Handled by the DMC firmware */
  1824. .domains = 0,
  1825. .ops = &hsw_power_well_ops,
  1826. .id = SKL_DISP_PW_1,
  1827. {
  1828. .hsw.has_fuses = true,
  1829. },
  1830. },
  1831. {
  1832. .name = "MISC IO power well",
  1833. /* Handled by the DMC firmware */
  1834. .domains = 0,
  1835. .ops = &hsw_power_well_ops,
  1836. .id = SKL_DISP_PW_MISC_IO,
  1837. },
  1838. {
  1839. .name = "DC off",
  1840. .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
  1841. .ops = &gen9_dc_off_power_well_ops,
  1842. .id = SKL_DISP_PW_DC_OFF,
  1843. },
  1844. {
  1845. .name = "power well 2",
  1846. .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  1847. .ops = &hsw_power_well_ops,
  1848. .id = SKL_DISP_PW_2,
  1849. {
  1850. .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
  1851. .hsw.has_vga = true,
  1852. .hsw.has_fuses = true,
  1853. },
  1854. },
  1855. {
  1856. .name = "DDI A/E IO power well",
  1857. .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
  1858. .ops = &hsw_power_well_ops,
  1859. .id = SKL_DISP_PW_DDI_A_E,
  1860. },
  1861. {
  1862. .name = "DDI B IO power well",
  1863. .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
  1864. .ops = &hsw_power_well_ops,
  1865. .id = SKL_DISP_PW_DDI_B,
  1866. },
  1867. {
  1868. .name = "DDI C IO power well",
  1869. .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
  1870. .ops = &hsw_power_well_ops,
  1871. .id = SKL_DISP_PW_DDI_C,
  1872. },
  1873. {
  1874. .name = "DDI D IO power well",
  1875. .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
  1876. .ops = &hsw_power_well_ops,
  1877. .id = SKL_DISP_PW_DDI_D,
  1878. },
  1879. };
  1880. static struct i915_power_well bxt_power_wells[] = {
  1881. {
  1882. .name = "always-on",
  1883. .always_on = 1,
  1884. .domains = POWER_DOMAIN_MASK,
  1885. .ops = &i9xx_always_on_power_well_ops,
  1886. .id = I915_DISP_PW_ALWAYS_ON,
  1887. },
  1888. {
  1889. .name = "power well 1",
  1890. .domains = 0,
  1891. .ops = &hsw_power_well_ops,
  1892. .id = SKL_DISP_PW_1,
  1893. {
  1894. .hsw.has_fuses = true,
  1895. },
  1896. },
  1897. {
  1898. .name = "DC off",
  1899. .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
  1900. .ops = &gen9_dc_off_power_well_ops,
  1901. .id = SKL_DISP_PW_DC_OFF,
  1902. },
  1903. {
  1904. .name = "power well 2",
  1905. .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  1906. .ops = &hsw_power_well_ops,
  1907. .id = SKL_DISP_PW_2,
  1908. {
  1909. .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
  1910. .hsw.has_vga = true,
  1911. .hsw.has_fuses = true,
  1912. },
  1913. },
  1914. {
  1915. .name = "dpio-common-a",
  1916. .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
  1917. .ops = &bxt_dpio_cmn_power_well_ops,
  1918. .id = BXT_DPIO_CMN_A,
  1919. {
  1920. .bxt.phy = DPIO_PHY1,
  1921. },
  1922. },
  1923. {
  1924. .name = "dpio-common-bc",
  1925. .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
  1926. .ops = &bxt_dpio_cmn_power_well_ops,
  1927. .id = BXT_DPIO_CMN_BC,
  1928. {
  1929. .bxt.phy = DPIO_PHY0,
  1930. },
  1931. },
  1932. };
  1933. static struct i915_power_well glk_power_wells[] = {
  1934. {
  1935. .name = "always-on",
  1936. .always_on = 1,
  1937. .domains = POWER_DOMAIN_MASK,
  1938. .ops = &i9xx_always_on_power_well_ops,
  1939. .id = I915_DISP_PW_ALWAYS_ON,
  1940. },
  1941. {
  1942. .name = "power well 1",
  1943. /* Handled by the DMC firmware */
  1944. .domains = 0,
  1945. .ops = &hsw_power_well_ops,
  1946. .id = SKL_DISP_PW_1,
  1947. {
  1948. .hsw.has_fuses = true,
  1949. },
  1950. },
  1951. {
  1952. .name = "DC off",
  1953. .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
  1954. .ops = &gen9_dc_off_power_well_ops,
  1955. .id = SKL_DISP_PW_DC_OFF,
  1956. },
  1957. {
  1958. .name = "power well 2",
  1959. .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  1960. .ops = &hsw_power_well_ops,
  1961. .id = SKL_DISP_PW_2,
  1962. {
  1963. .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
  1964. .hsw.has_vga = true,
  1965. .hsw.has_fuses = true,
  1966. },
  1967. },
  1968. {
  1969. .name = "dpio-common-a",
  1970. .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
  1971. .ops = &bxt_dpio_cmn_power_well_ops,
  1972. .id = BXT_DPIO_CMN_A,
  1973. {
  1974. .bxt.phy = DPIO_PHY1,
  1975. },
  1976. },
  1977. {
  1978. .name = "dpio-common-b",
  1979. .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
  1980. .ops = &bxt_dpio_cmn_power_well_ops,
  1981. .id = BXT_DPIO_CMN_BC,
  1982. {
  1983. .bxt.phy = DPIO_PHY0,
  1984. },
  1985. },
  1986. {
  1987. .name = "dpio-common-c",
  1988. .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
  1989. .ops = &bxt_dpio_cmn_power_well_ops,
  1990. .id = GLK_DPIO_CMN_C,
  1991. {
  1992. .bxt.phy = DPIO_PHY2,
  1993. },
  1994. },
  1995. {
  1996. .name = "AUX A",
  1997. .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
  1998. .ops = &hsw_power_well_ops,
  1999. .id = GLK_DISP_PW_AUX_A,
  2000. },
  2001. {
  2002. .name = "AUX B",
  2003. .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
  2004. .ops = &hsw_power_well_ops,
  2005. .id = GLK_DISP_PW_AUX_B,
  2006. },
  2007. {
  2008. .name = "AUX C",
  2009. .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
  2010. .ops = &hsw_power_well_ops,
  2011. .id = GLK_DISP_PW_AUX_C,
  2012. },
  2013. {
  2014. .name = "DDI A IO power well",
  2015. .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
  2016. .ops = &hsw_power_well_ops,
  2017. .id = GLK_DISP_PW_DDI_A,
  2018. },
  2019. {
  2020. .name = "DDI B IO power well",
  2021. .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
  2022. .ops = &hsw_power_well_ops,
  2023. .id = SKL_DISP_PW_DDI_B,
  2024. },
  2025. {
  2026. .name = "DDI C IO power well",
  2027. .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
  2028. .ops = &hsw_power_well_ops,
  2029. .id = SKL_DISP_PW_DDI_C,
  2030. },
  2031. };
  2032. static struct i915_power_well cnl_power_wells[] = {
  2033. {
  2034. .name = "always-on",
  2035. .always_on = 1,
  2036. .domains = POWER_DOMAIN_MASK,
  2037. .ops = &i9xx_always_on_power_well_ops,
  2038. .id = I915_DISP_PW_ALWAYS_ON,
  2039. },
  2040. {
  2041. .name = "power well 1",
  2042. /* Handled by the DMC firmware */
  2043. .domains = 0,
  2044. .ops = &hsw_power_well_ops,
  2045. .id = SKL_DISP_PW_1,
  2046. {
  2047. .hsw.has_fuses = true,
  2048. },
  2049. },
  2050. {
  2051. .name = "AUX A",
  2052. .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
  2053. .ops = &hsw_power_well_ops,
  2054. .id = CNL_DISP_PW_AUX_A,
  2055. },
  2056. {
  2057. .name = "AUX B",
  2058. .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
  2059. .ops = &hsw_power_well_ops,
  2060. .id = CNL_DISP_PW_AUX_B,
  2061. },
  2062. {
  2063. .name = "AUX C",
  2064. .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
  2065. .ops = &hsw_power_well_ops,
  2066. .id = CNL_DISP_PW_AUX_C,
  2067. },
  2068. {
  2069. .name = "AUX D",
  2070. .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
  2071. .ops = &hsw_power_well_ops,
  2072. .id = CNL_DISP_PW_AUX_D,
  2073. },
  2074. {
  2075. .name = "DC off",
  2076. .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
  2077. .ops = &gen9_dc_off_power_well_ops,
  2078. .id = SKL_DISP_PW_DC_OFF,
  2079. },
  2080. {
  2081. .name = "power well 2",
  2082. .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  2083. .ops = &hsw_power_well_ops,
  2084. .id = SKL_DISP_PW_2,
  2085. {
  2086. .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
  2087. .hsw.has_vga = true,
  2088. .hsw.has_fuses = true,
  2089. },
  2090. },
  2091. {
  2092. .name = "DDI A IO power well",
  2093. .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
  2094. .ops = &hsw_power_well_ops,
  2095. .id = CNL_DISP_PW_DDI_A,
  2096. },
  2097. {
  2098. .name = "DDI B IO power well",
  2099. .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
  2100. .ops = &hsw_power_well_ops,
  2101. .id = SKL_DISP_PW_DDI_B,
  2102. },
  2103. {
  2104. .name = "DDI C IO power well",
  2105. .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
  2106. .ops = &hsw_power_well_ops,
  2107. .id = SKL_DISP_PW_DDI_C,
  2108. },
  2109. {
  2110. .name = "DDI D IO power well",
  2111. .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
  2112. .ops = &hsw_power_well_ops,
  2113. .id = SKL_DISP_PW_DDI_D,
  2114. },
  2115. {
  2116. .name = "DDI F IO power well",
  2117. .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
  2118. .ops = &hsw_power_well_ops,
  2119. .id = CNL_DISP_PW_DDI_F,
  2120. },
  2121. {
  2122. .name = "AUX F",
  2123. .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
  2124. .ops = &hsw_power_well_ops,
  2125. .id = CNL_DISP_PW_AUX_F,
  2126. },
  2127. };
  2128. static int
  2129. sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
  2130. int disable_power_well)
  2131. {
  2132. if (disable_power_well >= 0)
  2133. return !!disable_power_well;
  2134. return 1;
  2135. }
  2136. static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
  2137. int enable_dc)
  2138. {
  2139. uint32_t mask;
  2140. int requested_dc;
  2141. int max_dc;
  2142. if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
  2143. max_dc = 2;
  2144. mask = 0;
  2145. } else if (IS_GEN9_LP(dev_priv)) {
  2146. max_dc = 1;
  2147. /*
  2148. * DC9 has a separate HW flow from the rest of the DC states,
  2149. * not depending on the DMC firmware. It's needed by system
  2150. * suspend/resume, so allow it unconditionally.
  2151. */
  2152. mask = DC_STATE_EN_DC9;
  2153. } else {
  2154. max_dc = 0;
  2155. mask = 0;
  2156. }
  2157. if (!i915_modparams.disable_power_well)
  2158. max_dc = 0;
  2159. if (enable_dc >= 0 && enable_dc <= max_dc) {
  2160. requested_dc = enable_dc;
  2161. } else if (enable_dc == -1) {
  2162. requested_dc = max_dc;
  2163. } else if (enable_dc > max_dc && enable_dc <= 2) {
  2164. DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
  2165. enable_dc, max_dc);
  2166. requested_dc = max_dc;
  2167. } else {
  2168. DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
  2169. requested_dc = max_dc;
  2170. }
  2171. if (requested_dc > 1)
  2172. mask |= DC_STATE_EN_UPTO_DC6;
  2173. if (requested_dc > 0)
  2174. mask |= DC_STATE_EN_UPTO_DC5;
  2175. DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
  2176. return mask;
  2177. }
  2178. static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv)
  2179. {
  2180. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2181. u64 power_well_ids;
  2182. int i;
  2183. power_well_ids = 0;
  2184. for (i = 0; i < power_domains->power_well_count; i++) {
  2185. enum i915_power_well_id id = power_domains->power_wells[i].id;
  2186. WARN_ON(id >= sizeof(power_well_ids) * 8);
  2187. WARN_ON(power_well_ids & BIT_ULL(id));
  2188. power_well_ids |= BIT_ULL(id);
  2189. }
  2190. }
  2191. #define set_power_wells(power_domains, __power_wells) ({ \
  2192. (power_domains)->power_wells = (__power_wells); \
  2193. (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
  2194. })
  2195. /**
  2196. * intel_power_domains_init - initializes the power domain structures
  2197. * @dev_priv: i915 device instance
  2198. *
  2199. * Initializes the power domain structures for @dev_priv depending upon the
  2200. * supported platform.
  2201. */
  2202. int intel_power_domains_init(struct drm_i915_private *dev_priv)
  2203. {
  2204. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2205. i915_modparams.disable_power_well =
  2206. sanitize_disable_power_well_option(dev_priv,
  2207. i915_modparams.disable_power_well);
  2208. dev_priv->csr.allowed_dc_mask =
  2209. get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
  2210. BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
  2211. mutex_init(&power_domains->lock);
  2212. /*
  2213. * The enabling order will be from lower to higher indexed wells,
  2214. * the disabling order is reversed.
  2215. */
  2216. if (IS_HASWELL(dev_priv)) {
  2217. set_power_wells(power_domains, hsw_power_wells);
  2218. } else if (IS_BROADWELL(dev_priv)) {
  2219. set_power_wells(power_domains, bdw_power_wells);
  2220. } else if (IS_GEN9_BC(dev_priv)) {
  2221. set_power_wells(power_domains, skl_power_wells);
  2222. } else if (IS_CANNONLAKE(dev_priv)) {
  2223. set_power_wells(power_domains, cnl_power_wells);
  2224. /*
  2225. * DDI and Aux IO are getting enabled for all ports
  2226. * regardless the presence or use. So, in order to avoid
  2227. * timeouts, lets remove them from the list
  2228. * for the SKUs without port F.
  2229. */
  2230. if (!IS_CNL_WITH_PORT_F(dev_priv))
  2231. power_domains->power_well_count -= 2;
  2232. } else if (IS_BROXTON(dev_priv)) {
  2233. set_power_wells(power_domains, bxt_power_wells);
  2234. } else if (IS_GEMINILAKE(dev_priv)) {
  2235. set_power_wells(power_domains, glk_power_wells);
  2236. } else if (IS_CHERRYVIEW(dev_priv)) {
  2237. set_power_wells(power_domains, chv_power_wells);
  2238. } else if (IS_VALLEYVIEW(dev_priv)) {
  2239. set_power_wells(power_domains, vlv_power_wells);
  2240. } else if (IS_I830(dev_priv)) {
  2241. set_power_wells(power_domains, i830_power_wells);
  2242. } else {
  2243. set_power_wells(power_domains, i9xx_always_on_power_well);
  2244. }
  2245. assert_power_well_ids_unique(dev_priv);
  2246. return 0;
  2247. }
  2248. /**
  2249. * intel_power_domains_fini - finalizes the power domain structures
  2250. * @dev_priv: i915 device instance
  2251. *
  2252. * Finalizes the power domain structures for @dev_priv depending upon the
  2253. * supported platform. This function also disables runtime pm and ensures that
  2254. * the device stays powered up so that the driver can be reloaded.
  2255. */
  2256. void intel_power_domains_fini(struct drm_i915_private *dev_priv)
  2257. {
  2258. struct device *kdev = &dev_priv->drm.pdev->dev;
  2259. /*
  2260. * The i915.ko module is still not prepared to be loaded when
  2261. * the power well is not enabled, so just enable it in case
  2262. * we're going to unload/reload.
  2263. * The following also reacquires the RPM reference the core passed
  2264. * to the driver during loading, which is dropped in
  2265. * intel_runtime_pm_enable(). We have to hand back the control of the
  2266. * device to the core with this reference held.
  2267. */
  2268. intel_display_set_init_power(dev_priv, true);
  2269. /* Remove the refcount we took to keep power well support disabled. */
  2270. if (!i915_modparams.disable_power_well)
  2271. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  2272. /*
  2273. * Remove the refcount we took in intel_runtime_pm_enable() in case
  2274. * the platform doesn't support runtime PM.
  2275. */
  2276. if (!HAS_RUNTIME_PM(dev_priv))
  2277. pm_runtime_put(kdev);
  2278. }
  2279. static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
  2280. {
  2281. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2282. struct i915_power_well *power_well;
  2283. mutex_lock(&power_domains->lock);
  2284. for_each_power_well(dev_priv, power_well) {
  2285. power_well->ops->sync_hw(dev_priv, power_well);
  2286. power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
  2287. power_well);
  2288. }
  2289. mutex_unlock(&power_domains->lock);
  2290. }
  2291. static inline
  2292. bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
  2293. i915_reg_t reg, bool enable)
  2294. {
  2295. u32 val, status;
  2296. val = I915_READ(reg);
  2297. val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
  2298. I915_WRITE(reg, val);
  2299. POSTING_READ(reg);
  2300. udelay(10);
  2301. status = I915_READ(reg) & DBUF_POWER_STATE;
  2302. if ((enable && !status) || (!enable && status)) {
  2303. DRM_ERROR("DBus power %s timeout!\n",
  2304. enable ? "enable" : "disable");
  2305. return false;
  2306. }
  2307. return true;
  2308. }
  2309. static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
  2310. {
  2311. intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
  2312. }
  2313. static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
  2314. {
  2315. intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
  2316. }
  2317. static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
  2318. {
  2319. if (INTEL_GEN(dev_priv) < 11)
  2320. return 1;
  2321. return 2;
  2322. }
  2323. void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
  2324. u8 req_slices)
  2325. {
  2326. u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
  2327. u32 val;
  2328. bool ret;
  2329. if (req_slices > intel_dbuf_max_slices(dev_priv)) {
  2330. DRM_ERROR("Invalid number of dbuf slices requested\n");
  2331. return;
  2332. }
  2333. if (req_slices == hw_enabled_slices || req_slices == 0)
  2334. return;
  2335. val = I915_READ(DBUF_CTL_S2);
  2336. if (req_slices > hw_enabled_slices)
  2337. ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
  2338. else
  2339. ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
  2340. if (ret)
  2341. dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
  2342. }
  2343. static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
  2344. {
  2345. I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
  2346. I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
  2347. POSTING_READ(DBUF_CTL_S2);
  2348. udelay(10);
  2349. if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
  2350. !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
  2351. DRM_ERROR("DBuf power enable timeout\n");
  2352. else
  2353. dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
  2354. }
  2355. static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
  2356. {
  2357. I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
  2358. I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
  2359. POSTING_READ(DBUF_CTL_S2);
  2360. udelay(10);
  2361. if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
  2362. (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
  2363. DRM_ERROR("DBuf power disable timeout!\n");
  2364. else
  2365. dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
  2366. }
  2367. static void icl_mbus_init(struct drm_i915_private *dev_priv)
  2368. {
  2369. uint32_t val;
  2370. val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
  2371. MBUS_ABOX_BT_CREDIT_POOL2(16) |
  2372. MBUS_ABOX_B_CREDIT(1) |
  2373. MBUS_ABOX_BW_CREDIT(1);
  2374. I915_WRITE(MBUS_ABOX_CTL, val);
  2375. }
  2376. static void skl_display_core_init(struct drm_i915_private *dev_priv,
  2377. bool resume)
  2378. {
  2379. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2380. struct i915_power_well *well;
  2381. uint32_t val;
  2382. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2383. /* enable PCH reset handshake */
  2384. val = I915_READ(HSW_NDE_RSTWRN_OPT);
  2385. I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
  2386. /* enable PG1 and Misc I/O */
  2387. mutex_lock(&power_domains->lock);
  2388. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2389. intel_power_well_enable(dev_priv, well);
  2390. well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
  2391. intel_power_well_enable(dev_priv, well);
  2392. mutex_unlock(&power_domains->lock);
  2393. skl_init_cdclk(dev_priv);
  2394. gen9_dbuf_enable(dev_priv);
  2395. if (resume && dev_priv->csr.dmc_payload)
  2396. intel_csr_load_program(dev_priv);
  2397. }
  2398. static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
  2399. {
  2400. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2401. struct i915_power_well *well;
  2402. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2403. gen9_dbuf_disable(dev_priv);
  2404. skl_uninit_cdclk(dev_priv);
  2405. /* The spec doesn't call for removing the reset handshake flag */
  2406. /* disable PG1 and Misc I/O */
  2407. mutex_lock(&power_domains->lock);
  2408. /*
  2409. * BSpec says to keep the MISC IO power well enabled here, only
  2410. * remove our request for power well 1.
  2411. * Note that even though the driver's request is removed power well 1
  2412. * may stay enabled after this due to DMC's own request on it.
  2413. */
  2414. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2415. intel_power_well_disable(dev_priv, well);
  2416. mutex_unlock(&power_domains->lock);
  2417. usleep_range(10, 30); /* 10 us delay per Bspec */
  2418. }
  2419. void bxt_display_core_init(struct drm_i915_private *dev_priv,
  2420. bool resume)
  2421. {
  2422. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2423. struct i915_power_well *well;
  2424. uint32_t val;
  2425. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2426. /*
  2427. * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
  2428. * or else the reset will hang because there is no PCH to respond.
  2429. * Move the handshake programming to initialization sequence.
  2430. * Previously was left up to BIOS.
  2431. */
  2432. val = I915_READ(HSW_NDE_RSTWRN_OPT);
  2433. val &= ~RESET_PCH_HANDSHAKE_ENABLE;
  2434. I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
  2435. /* Enable PG1 */
  2436. mutex_lock(&power_domains->lock);
  2437. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2438. intel_power_well_enable(dev_priv, well);
  2439. mutex_unlock(&power_domains->lock);
  2440. bxt_init_cdclk(dev_priv);
  2441. gen9_dbuf_enable(dev_priv);
  2442. if (resume && dev_priv->csr.dmc_payload)
  2443. intel_csr_load_program(dev_priv);
  2444. }
  2445. void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
  2446. {
  2447. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2448. struct i915_power_well *well;
  2449. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2450. gen9_dbuf_disable(dev_priv);
  2451. bxt_uninit_cdclk(dev_priv);
  2452. /* The spec doesn't call for removing the reset handshake flag */
  2453. /*
  2454. * Disable PW1 (PG1).
  2455. * Note that even though the driver's request is removed power well 1
  2456. * may stay enabled after this due to DMC's own request on it.
  2457. */
  2458. mutex_lock(&power_domains->lock);
  2459. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2460. intel_power_well_disable(dev_priv, well);
  2461. mutex_unlock(&power_domains->lock);
  2462. usleep_range(10, 30); /* 10 us delay per Bspec */
  2463. }
  2464. enum {
  2465. PROCMON_0_85V_DOT_0,
  2466. PROCMON_0_95V_DOT_0,
  2467. PROCMON_0_95V_DOT_1,
  2468. PROCMON_1_05V_DOT_0,
  2469. PROCMON_1_05V_DOT_1,
  2470. };
  2471. static const struct cnl_procmon {
  2472. u32 dw1, dw9, dw10;
  2473. } cnl_procmon_values[] = {
  2474. [PROCMON_0_85V_DOT_0] =
  2475. { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
  2476. [PROCMON_0_95V_DOT_0] =
  2477. { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
  2478. [PROCMON_0_95V_DOT_1] =
  2479. { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
  2480. [PROCMON_1_05V_DOT_0] =
  2481. { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
  2482. [PROCMON_1_05V_DOT_1] =
  2483. { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
  2484. };
  2485. /*
  2486. * CNL has just one set of registers, while ICL has two sets: one for port A and
  2487. * the other for port B. The CNL registers are equivalent to the ICL port A
  2488. * registers, that's why we call the ICL macros even though the function has CNL
  2489. * on its name.
  2490. */
  2491. static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
  2492. enum port port)
  2493. {
  2494. const struct cnl_procmon *procmon;
  2495. u32 val;
  2496. val = I915_READ(ICL_PORT_COMP_DW3(port));
  2497. switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
  2498. default:
  2499. MISSING_CASE(val);
  2500. case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
  2501. procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
  2502. break;
  2503. case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
  2504. procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
  2505. break;
  2506. case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
  2507. procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
  2508. break;
  2509. case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
  2510. procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
  2511. break;
  2512. case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
  2513. procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
  2514. break;
  2515. }
  2516. val = I915_READ(ICL_PORT_COMP_DW1(port));
  2517. val &= ~((0xff << 16) | 0xff);
  2518. val |= procmon->dw1;
  2519. I915_WRITE(ICL_PORT_COMP_DW1(port), val);
  2520. I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
  2521. I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
  2522. }
  2523. static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
  2524. {
  2525. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2526. struct i915_power_well *well;
  2527. u32 val;
  2528. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2529. /* 1. Enable PCH Reset Handshake */
  2530. val = I915_READ(HSW_NDE_RSTWRN_OPT);
  2531. val |= RESET_PCH_HANDSHAKE_ENABLE;
  2532. I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
  2533. /* 2. Enable Comp */
  2534. val = I915_READ(CHICKEN_MISC_2);
  2535. val &= ~CNL_COMP_PWR_DOWN;
  2536. I915_WRITE(CHICKEN_MISC_2, val);
  2537. /* Dummy PORT_A to get the correct CNL register from the ICL macro */
  2538. cnl_set_procmon_ref_values(dev_priv, PORT_A);
  2539. val = I915_READ(CNL_PORT_COMP_DW0);
  2540. val |= COMP_INIT;
  2541. I915_WRITE(CNL_PORT_COMP_DW0, val);
  2542. /* 3. */
  2543. val = I915_READ(CNL_PORT_CL1CM_DW5);
  2544. val |= CL_POWER_DOWN_ENABLE;
  2545. I915_WRITE(CNL_PORT_CL1CM_DW5, val);
  2546. /*
  2547. * 4. Enable Power Well 1 (PG1).
  2548. * The AUX IO power wells will be enabled on demand.
  2549. */
  2550. mutex_lock(&power_domains->lock);
  2551. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2552. intel_power_well_enable(dev_priv, well);
  2553. mutex_unlock(&power_domains->lock);
  2554. /* 5. Enable CD clock */
  2555. cnl_init_cdclk(dev_priv);
  2556. /* 6. Enable DBUF */
  2557. gen9_dbuf_enable(dev_priv);
  2558. if (resume && dev_priv->csr.dmc_payload)
  2559. intel_csr_load_program(dev_priv);
  2560. }
  2561. static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
  2562. {
  2563. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2564. struct i915_power_well *well;
  2565. u32 val;
  2566. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2567. /* 1. Disable all display engine functions -> aready done */
  2568. /* 2. Disable DBUF */
  2569. gen9_dbuf_disable(dev_priv);
  2570. /* 3. Disable CD clock */
  2571. cnl_uninit_cdclk(dev_priv);
  2572. /*
  2573. * 4. Disable Power Well 1 (PG1).
  2574. * The AUX IO power wells are toggled on demand, so they are already
  2575. * disabled at this point.
  2576. */
  2577. mutex_lock(&power_domains->lock);
  2578. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2579. intel_power_well_disable(dev_priv, well);
  2580. mutex_unlock(&power_domains->lock);
  2581. usleep_range(10, 30); /* 10 us delay per Bspec */
  2582. /* 5. Disable Comp */
  2583. val = I915_READ(CHICKEN_MISC_2);
  2584. val |= CNL_COMP_PWR_DOWN;
  2585. I915_WRITE(CHICKEN_MISC_2, val);
  2586. }
  2587. static void icl_display_core_init(struct drm_i915_private *dev_priv,
  2588. bool resume)
  2589. {
  2590. enum port port;
  2591. u32 val;
  2592. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2593. /* 1. Enable PCH reset handshake. */
  2594. val = I915_READ(HSW_NDE_RSTWRN_OPT);
  2595. val |= RESET_PCH_HANDSHAKE_ENABLE;
  2596. I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
  2597. for (port = PORT_A; port <= PORT_B; port++) {
  2598. /* 2. Enable DDI combo PHY comp. */
  2599. val = I915_READ(ICL_PHY_MISC(port));
  2600. val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
  2601. I915_WRITE(ICL_PHY_MISC(port), val);
  2602. cnl_set_procmon_ref_values(dev_priv, port);
  2603. val = I915_READ(ICL_PORT_COMP_DW0(port));
  2604. val |= COMP_INIT;
  2605. I915_WRITE(ICL_PORT_COMP_DW0(port), val);
  2606. /* 3. Set power down enable. */
  2607. val = I915_READ(ICL_PORT_CL_DW5(port));
  2608. val |= CL_POWER_DOWN_ENABLE;
  2609. I915_WRITE(ICL_PORT_CL_DW5(port), val);
  2610. }
  2611. /* 4. Enable power well 1 (PG1) and aux IO power. */
  2612. /* FIXME: ICL power wells code not here yet. */
  2613. /* 5. Enable CDCLK. */
  2614. icl_init_cdclk(dev_priv);
  2615. /* 6. Enable DBUF. */
  2616. icl_dbuf_enable(dev_priv);
  2617. /* 7. Setup MBUS. */
  2618. icl_mbus_init(dev_priv);
  2619. /* 8. CHICKEN_DCPR_1 */
  2620. I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
  2621. CNL_DDI_CLOCK_REG_ACCESS_ON);
  2622. }
  2623. static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
  2624. {
  2625. enum port port;
  2626. u32 val;
  2627. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2628. /* 1. Disable all display engine functions -> aready done */
  2629. /* 2. Disable DBUF */
  2630. icl_dbuf_disable(dev_priv);
  2631. /* 3. Disable CD clock */
  2632. icl_uninit_cdclk(dev_priv);
  2633. /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
  2634. /* FIXME: ICL power wells code not here yet. */
  2635. /* 5. Disable Comp */
  2636. for (port = PORT_A; port <= PORT_B; port++) {
  2637. val = I915_READ(ICL_PHY_MISC(port));
  2638. val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
  2639. I915_WRITE(ICL_PHY_MISC(port), val);
  2640. }
  2641. }
  2642. static void chv_phy_control_init(struct drm_i915_private *dev_priv)
  2643. {
  2644. struct i915_power_well *cmn_bc =
  2645. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  2646. struct i915_power_well *cmn_d =
  2647. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
  2648. /*
  2649. * DISPLAY_PHY_CONTROL can get corrupted if read. As a
  2650. * workaround never ever read DISPLAY_PHY_CONTROL, and
  2651. * instead maintain a shadow copy ourselves. Use the actual
  2652. * power well state and lane status to reconstruct the
  2653. * expected initial value.
  2654. */
  2655. dev_priv->chv_phy_control =
  2656. PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
  2657. PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
  2658. PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
  2659. PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
  2660. PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
  2661. /*
  2662. * If all lanes are disabled we leave the override disabled
  2663. * with all power down bits cleared to match the state we
  2664. * would use after disabling the port. Otherwise enable the
  2665. * override and set the lane powerdown bits accding to the
  2666. * current lane status.
  2667. */
  2668. if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
  2669. uint32_t status = I915_READ(DPLL(PIPE_A));
  2670. unsigned int mask;
  2671. mask = status & DPLL_PORTB_READY_MASK;
  2672. if (mask == 0xf)
  2673. mask = 0x0;
  2674. else
  2675. dev_priv->chv_phy_control |=
  2676. PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
  2677. dev_priv->chv_phy_control |=
  2678. PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
  2679. mask = (status & DPLL_PORTC_READY_MASK) >> 4;
  2680. if (mask == 0xf)
  2681. mask = 0x0;
  2682. else
  2683. dev_priv->chv_phy_control |=
  2684. PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
  2685. dev_priv->chv_phy_control |=
  2686. PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
  2687. dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
  2688. dev_priv->chv_phy_assert[DPIO_PHY0] = false;
  2689. } else {
  2690. dev_priv->chv_phy_assert[DPIO_PHY0] = true;
  2691. }
  2692. if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
  2693. uint32_t status = I915_READ(DPIO_PHY_STATUS);
  2694. unsigned int mask;
  2695. mask = status & DPLL_PORTD_READY_MASK;
  2696. if (mask == 0xf)
  2697. mask = 0x0;
  2698. else
  2699. dev_priv->chv_phy_control |=
  2700. PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
  2701. dev_priv->chv_phy_control |=
  2702. PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
  2703. dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
  2704. dev_priv->chv_phy_assert[DPIO_PHY1] = false;
  2705. } else {
  2706. dev_priv->chv_phy_assert[DPIO_PHY1] = true;
  2707. }
  2708. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  2709. DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
  2710. dev_priv->chv_phy_control);
  2711. }
  2712. static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
  2713. {
  2714. struct i915_power_well *cmn =
  2715. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  2716. struct i915_power_well *disp2d =
  2717. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
  2718. /* If the display might be already active skip this */
  2719. if (cmn->ops->is_enabled(dev_priv, cmn) &&
  2720. disp2d->ops->is_enabled(dev_priv, disp2d) &&
  2721. I915_READ(DPIO_CTL) & DPIO_CMNRST)
  2722. return;
  2723. DRM_DEBUG_KMS("toggling display PHY side reset\n");
  2724. /* cmnlane needs DPLL registers */
  2725. disp2d->ops->enable(dev_priv, disp2d);
  2726. /*
  2727. * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
  2728. * Need to assert and de-assert PHY SB reset by gating the
  2729. * common lane power, then un-gating it.
  2730. * Simply ungating isn't enough to reset the PHY enough to get
  2731. * ports and lanes running.
  2732. */
  2733. cmn->ops->disable(dev_priv, cmn);
  2734. }
  2735. /**
  2736. * intel_power_domains_init_hw - initialize hardware power domain state
  2737. * @dev_priv: i915 device instance
  2738. * @resume: Called from resume code paths or not
  2739. *
  2740. * This function initializes the hardware power domain state and enables all
  2741. * power wells belonging to the INIT power domain. Power wells in other
  2742. * domains (and not in the INIT domain) are referenced or disabled during the
  2743. * modeset state HW readout. After that the reference count of each power well
  2744. * must match its HW enabled state, see intel_power_domains_verify_state().
  2745. */
  2746. void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
  2747. {
  2748. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2749. power_domains->initializing = true;
  2750. if (IS_ICELAKE(dev_priv)) {
  2751. icl_display_core_init(dev_priv, resume);
  2752. } else if (IS_CANNONLAKE(dev_priv)) {
  2753. cnl_display_core_init(dev_priv, resume);
  2754. } else if (IS_GEN9_BC(dev_priv)) {
  2755. skl_display_core_init(dev_priv, resume);
  2756. } else if (IS_GEN9_LP(dev_priv)) {
  2757. bxt_display_core_init(dev_priv, resume);
  2758. } else if (IS_CHERRYVIEW(dev_priv)) {
  2759. mutex_lock(&power_domains->lock);
  2760. chv_phy_control_init(dev_priv);
  2761. mutex_unlock(&power_domains->lock);
  2762. } else if (IS_VALLEYVIEW(dev_priv)) {
  2763. mutex_lock(&power_domains->lock);
  2764. vlv_cmnlane_wa(dev_priv);
  2765. mutex_unlock(&power_domains->lock);
  2766. }
  2767. /* For now, we need the power well to be always enabled. */
  2768. intel_display_set_init_power(dev_priv, true);
  2769. /* Disable power support if the user asked so. */
  2770. if (!i915_modparams.disable_power_well)
  2771. intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  2772. intel_power_domains_sync_hw(dev_priv);
  2773. power_domains->initializing = false;
  2774. }
  2775. /**
  2776. * intel_power_domains_suspend - suspend power domain state
  2777. * @dev_priv: i915 device instance
  2778. *
  2779. * This function prepares the hardware power domain state before entering
  2780. * system suspend. It must be paired with intel_power_domains_init_hw().
  2781. */
  2782. void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
  2783. {
  2784. /*
  2785. * Even if power well support was disabled we still want to disable
  2786. * power wells while we are system suspended.
  2787. */
  2788. if (!i915_modparams.disable_power_well)
  2789. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  2790. if (IS_ICELAKE(dev_priv))
  2791. icl_display_core_uninit(dev_priv);
  2792. else if (IS_CANNONLAKE(dev_priv))
  2793. cnl_display_core_uninit(dev_priv);
  2794. else if (IS_GEN9_BC(dev_priv))
  2795. skl_display_core_uninit(dev_priv);
  2796. else if (IS_GEN9_LP(dev_priv))
  2797. bxt_display_core_uninit(dev_priv);
  2798. }
  2799. static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
  2800. {
  2801. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2802. struct i915_power_well *power_well;
  2803. for_each_power_well(dev_priv, power_well) {
  2804. enum intel_display_power_domain domain;
  2805. DRM_DEBUG_DRIVER("%-25s %d\n",
  2806. power_well->name, power_well->count);
  2807. for_each_power_domain(domain, power_well->domains)
  2808. DRM_DEBUG_DRIVER(" %-23s %d\n",
  2809. intel_display_power_domain_str(domain),
  2810. power_domains->domain_use_count[domain]);
  2811. }
  2812. }
  2813. /**
  2814. * intel_power_domains_verify_state - verify the HW/SW state for all power wells
  2815. * @dev_priv: i915 device instance
  2816. *
  2817. * Verify if the reference count of each power well matches its HW enabled
  2818. * state and the total refcount of the domains it belongs to. This must be
  2819. * called after modeset HW state sanitization, which is responsible for
  2820. * acquiring reference counts for any power wells in use and disabling the
  2821. * ones left on by BIOS but not required by any active output.
  2822. */
  2823. void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
  2824. {
  2825. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2826. struct i915_power_well *power_well;
  2827. bool dump_domain_info;
  2828. mutex_lock(&power_domains->lock);
  2829. dump_domain_info = false;
  2830. for_each_power_well(dev_priv, power_well) {
  2831. enum intel_display_power_domain domain;
  2832. int domains_count;
  2833. bool enabled;
  2834. /*
  2835. * Power wells not belonging to any domain (like the MISC_IO
  2836. * and PW1 power wells) are under FW control, so ignore them,
  2837. * since their state can change asynchronously.
  2838. */
  2839. if (!power_well->domains)
  2840. continue;
  2841. enabled = power_well->ops->is_enabled(dev_priv, power_well);
  2842. if ((power_well->count || power_well->always_on) != enabled)
  2843. DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
  2844. power_well->name, power_well->count, enabled);
  2845. domains_count = 0;
  2846. for_each_power_domain(domain, power_well->domains)
  2847. domains_count += power_domains->domain_use_count[domain];
  2848. if (power_well->count != domains_count) {
  2849. DRM_ERROR("power well %s refcount/domain refcount mismatch "
  2850. "(refcount %d/domains refcount %d)\n",
  2851. power_well->name, power_well->count,
  2852. domains_count);
  2853. dump_domain_info = true;
  2854. }
  2855. }
  2856. if (dump_domain_info) {
  2857. static bool dumped;
  2858. if (!dumped) {
  2859. intel_power_domains_dump_info(dev_priv);
  2860. dumped = true;
  2861. }
  2862. }
  2863. mutex_unlock(&power_domains->lock);
  2864. }
  2865. /**
  2866. * intel_runtime_pm_get - grab a runtime pm reference
  2867. * @dev_priv: i915 device instance
  2868. *
  2869. * This function grabs a device-level runtime pm reference (mostly used for GEM
  2870. * code to ensure the GTT or GT is on) and ensures that it is powered up.
  2871. *
  2872. * Any runtime pm reference obtained by this function must have a symmetric
  2873. * call to intel_runtime_pm_put() to release the reference again.
  2874. */
  2875. void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  2876. {
  2877. struct pci_dev *pdev = dev_priv->drm.pdev;
  2878. struct device *kdev = &pdev->dev;
  2879. int ret;
  2880. ret = pm_runtime_get_sync(kdev);
  2881. WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
  2882. atomic_inc(&dev_priv->runtime_pm.wakeref_count);
  2883. assert_rpm_wakelock_held(dev_priv);
  2884. }
  2885. /**
  2886. * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
  2887. * @dev_priv: i915 device instance
  2888. *
  2889. * This function grabs a device-level runtime pm reference if the device is
  2890. * already in use and ensures that it is powered up. It is illegal to try
  2891. * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
  2892. *
  2893. * Any runtime pm reference obtained by this function must have a symmetric
  2894. * call to intel_runtime_pm_put() to release the reference again.
  2895. *
  2896. * Returns: True if the wakeref was acquired, or False otherwise.
  2897. */
  2898. bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
  2899. {
  2900. if (IS_ENABLED(CONFIG_PM)) {
  2901. struct pci_dev *pdev = dev_priv->drm.pdev;
  2902. struct device *kdev = &pdev->dev;
  2903. /*
  2904. * In cases runtime PM is disabled by the RPM core and we get
  2905. * an -EINVAL return value we are not supposed to call this
  2906. * function, since the power state is undefined. This applies
  2907. * atm to the late/early system suspend/resume handlers.
  2908. */
  2909. if (pm_runtime_get_if_in_use(kdev) <= 0)
  2910. return false;
  2911. }
  2912. atomic_inc(&dev_priv->runtime_pm.wakeref_count);
  2913. assert_rpm_wakelock_held(dev_priv);
  2914. return true;
  2915. }
  2916. /**
  2917. * intel_runtime_pm_get_noresume - grab a runtime pm reference
  2918. * @dev_priv: i915 device instance
  2919. *
  2920. * This function grabs a device-level runtime pm reference (mostly used for GEM
  2921. * code to ensure the GTT or GT is on).
  2922. *
  2923. * It will _not_ power up the device but instead only check that it's powered
  2924. * on. Therefore it is only valid to call this functions from contexts where
  2925. * the device is known to be powered up and where trying to power it up would
  2926. * result in hilarity and deadlocks. That pretty much means only the system
  2927. * suspend/resume code where this is used to grab runtime pm references for
  2928. * delayed setup down in work items.
  2929. *
  2930. * Any runtime pm reference obtained by this function must have a symmetric
  2931. * call to intel_runtime_pm_put() to release the reference again.
  2932. */
  2933. void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  2934. {
  2935. struct pci_dev *pdev = dev_priv->drm.pdev;
  2936. struct device *kdev = &pdev->dev;
  2937. assert_rpm_wakelock_held(dev_priv);
  2938. pm_runtime_get_noresume(kdev);
  2939. atomic_inc(&dev_priv->runtime_pm.wakeref_count);
  2940. }
  2941. /**
  2942. * intel_runtime_pm_put - release a runtime pm reference
  2943. * @dev_priv: i915 device instance
  2944. *
  2945. * This function drops the device-level runtime pm reference obtained by
  2946. * intel_runtime_pm_get() and might power down the corresponding
  2947. * hardware block right away if this is the last reference.
  2948. */
  2949. void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  2950. {
  2951. struct pci_dev *pdev = dev_priv->drm.pdev;
  2952. struct device *kdev = &pdev->dev;
  2953. assert_rpm_wakelock_held(dev_priv);
  2954. atomic_dec(&dev_priv->runtime_pm.wakeref_count);
  2955. pm_runtime_mark_last_busy(kdev);
  2956. pm_runtime_put_autosuspend(kdev);
  2957. }
  2958. /**
  2959. * intel_runtime_pm_enable - enable runtime pm
  2960. * @dev_priv: i915 device instance
  2961. *
  2962. * This function enables runtime pm at the end of the driver load sequence.
  2963. *
  2964. * Note that this function does currently not enable runtime pm for the
  2965. * subordinate display power domains. That is only done on the first modeset
  2966. * using intel_display_set_init_power().
  2967. */
  2968. void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
  2969. {
  2970. struct pci_dev *pdev = dev_priv->drm.pdev;
  2971. struct device *kdev = &pdev->dev;
  2972. pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
  2973. pm_runtime_mark_last_busy(kdev);
  2974. /*
  2975. * Take a permanent reference to disable the RPM functionality and drop
  2976. * it only when unloading the driver. Use the low level get/put helpers,
  2977. * so the driver's own RPM reference tracking asserts also work on
  2978. * platforms without RPM support.
  2979. */
  2980. if (!HAS_RUNTIME_PM(dev_priv)) {
  2981. int ret;
  2982. pm_runtime_dont_use_autosuspend(kdev);
  2983. ret = pm_runtime_get_sync(kdev);
  2984. WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
  2985. } else {
  2986. pm_runtime_use_autosuspend(kdev);
  2987. }
  2988. /*
  2989. * The core calls the driver load handler with an RPM reference held.
  2990. * We drop that here and will reacquire it during unloading in
  2991. * intel_power_domains_fini().
  2992. */
  2993. pm_runtime_put_autosuspend(kdev);
  2994. }