intel_runtime_pm.c 80 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761
  1. /*
  2. * Copyright © 2012-2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eugeni Dodonov <eugeni.dodonov@intel.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. *
  27. */
  28. #include <linux/pm_runtime.h>
  29. #include <linux/vgaarb.h>
  30. #include "i915_drv.h"
  31. #include "intel_drv.h"
  32. /**
  33. * DOC: runtime pm
  34. *
  35. * The i915 driver supports dynamic enabling and disabling of entire hardware
  36. * blocks at runtime. This is especially important on the display side where
  37. * software is supposed to control many power gates manually on recent hardware,
  38. * since on the GT side a lot of the power management is done by the hardware.
  39. * But even there some manual control at the device level is required.
  40. *
  41. * Since i915 supports a diverse set of platforms with a unified codebase and
  42. * hardware engineers just love to shuffle functionality around between power
  43. * domains there's a sizeable amount of indirection required. This file provides
  44. * generic functions to the driver for grabbing and releasing references for
  45. * abstract power domains. It then maps those to the actual power wells
  46. * present for a given platform.
  47. */
  48. #define for_each_power_well(i, power_well, domain_mask, power_domains) \
  49. for (i = 0; \
  50. i < (power_domains)->power_well_count && \
  51. ((power_well) = &(power_domains)->power_wells[i]); \
  52. i++) \
  53. for_each_if ((power_well)->domains & (domain_mask))
  54. #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
  55. for (i = (power_domains)->power_well_count - 1; \
  56. i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
  57. i--) \
  58. for_each_if ((power_well)->domains & (domain_mask))
  59. bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  60. int power_well_id);
  61. static struct i915_power_well *
  62. lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
  63. const char *
  64. intel_display_power_domain_str(enum intel_display_power_domain domain)
  65. {
  66. switch (domain) {
  67. case POWER_DOMAIN_PIPE_A:
  68. return "PIPE_A";
  69. case POWER_DOMAIN_PIPE_B:
  70. return "PIPE_B";
  71. case POWER_DOMAIN_PIPE_C:
  72. return "PIPE_C";
  73. case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
  74. return "PIPE_A_PANEL_FITTER";
  75. case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
  76. return "PIPE_B_PANEL_FITTER";
  77. case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
  78. return "PIPE_C_PANEL_FITTER";
  79. case POWER_DOMAIN_TRANSCODER_A:
  80. return "TRANSCODER_A";
  81. case POWER_DOMAIN_TRANSCODER_B:
  82. return "TRANSCODER_B";
  83. case POWER_DOMAIN_TRANSCODER_C:
  84. return "TRANSCODER_C";
  85. case POWER_DOMAIN_TRANSCODER_EDP:
  86. return "TRANSCODER_EDP";
  87. case POWER_DOMAIN_TRANSCODER_DSI_A:
  88. return "TRANSCODER_DSI_A";
  89. case POWER_DOMAIN_TRANSCODER_DSI_C:
  90. return "TRANSCODER_DSI_C";
  91. case POWER_DOMAIN_PORT_DDI_A_LANES:
  92. return "PORT_DDI_A_LANES";
  93. case POWER_DOMAIN_PORT_DDI_B_LANES:
  94. return "PORT_DDI_B_LANES";
  95. case POWER_DOMAIN_PORT_DDI_C_LANES:
  96. return "PORT_DDI_C_LANES";
  97. case POWER_DOMAIN_PORT_DDI_D_LANES:
  98. return "PORT_DDI_D_LANES";
  99. case POWER_DOMAIN_PORT_DDI_E_LANES:
  100. return "PORT_DDI_E_LANES";
  101. case POWER_DOMAIN_PORT_DSI:
  102. return "PORT_DSI";
  103. case POWER_DOMAIN_PORT_CRT:
  104. return "PORT_CRT";
  105. case POWER_DOMAIN_PORT_OTHER:
  106. return "PORT_OTHER";
  107. case POWER_DOMAIN_VGA:
  108. return "VGA";
  109. case POWER_DOMAIN_AUDIO:
  110. return "AUDIO";
  111. case POWER_DOMAIN_PLLS:
  112. return "PLLS";
  113. case POWER_DOMAIN_AUX_A:
  114. return "AUX_A";
  115. case POWER_DOMAIN_AUX_B:
  116. return "AUX_B";
  117. case POWER_DOMAIN_AUX_C:
  118. return "AUX_C";
  119. case POWER_DOMAIN_AUX_D:
  120. return "AUX_D";
  121. case POWER_DOMAIN_GMBUS:
  122. return "GMBUS";
  123. case POWER_DOMAIN_INIT:
  124. return "INIT";
  125. case POWER_DOMAIN_MODESET:
  126. return "MODESET";
  127. default:
  128. MISSING_CASE(domain);
  129. return "?";
  130. }
  131. }
  132. static void intel_power_well_enable(struct drm_i915_private *dev_priv,
  133. struct i915_power_well *power_well)
  134. {
  135. DRM_DEBUG_KMS("enabling %s\n", power_well->name);
  136. power_well->ops->enable(dev_priv, power_well);
  137. power_well->hw_enabled = true;
  138. }
  139. static void intel_power_well_disable(struct drm_i915_private *dev_priv,
  140. struct i915_power_well *power_well)
  141. {
  142. DRM_DEBUG_KMS("disabling %s\n", power_well->name);
  143. power_well->hw_enabled = false;
  144. power_well->ops->disable(dev_priv, power_well);
  145. }
  146. static void intel_power_well_get(struct drm_i915_private *dev_priv,
  147. struct i915_power_well *power_well)
  148. {
  149. if (!power_well->count++)
  150. intel_power_well_enable(dev_priv, power_well);
  151. }
  152. static void intel_power_well_put(struct drm_i915_private *dev_priv,
  153. struct i915_power_well *power_well)
  154. {
  155. WARN(!power_well->count, "Use count on power well %s is already zero",
  156. power_well->name);
  157. if (!--power_well->count)
  158. intel_power_well_disable(dev_priv, power_well);
  159. }
  160. /*
  161. * We should only use the power well if we explicitly asked the hardware to
  162. * enable it, so check if it's enabled and also check if we've requested it to
  163. * be enabled.
  164. */
  165. static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
  166. struct i915_power_well *power_well)
  167. {
  168. return I915_READ(HSW_PWR_WELL_DRIVER) ==
  169. (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
  170. }
  171. /**
  172. * __intel_display_power_is_enabled - unlocked check for a power domain
  173. * @dev_priv: i915 device instance
  174. * @domain: power domain to check
  175. *
  176. * This is the unlocked version of intel_display_power_is_enabled() and should
  177. * only be used from error capture and recovery code where deadlocks are
  178. * possible.
  179. *
  180. * Returns:
  181. * True when the power domain is enabled, false otherwise.
  182. */
  183. bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  184. enum intel_display_power_domain domain)
  185. {
  186. struct i915_power_domains *power_domains;
  187. struct i915_power_well *power_well;
  188. bool is_enabled;
  189. int i;
  190. if (dev_priv->pm.suspended)
  191. return false;
  192. power_domains = &dev_priv->power_domains;
  193. is_enabled = true;
  194. for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
  195. if (power_well->always_on)
  196. continue;
  197. if (!power_well->hw_enabled) {
  198. is_enabled = false;
  199. break;
  200. }
  201. }
  202. return is_enabled;
  203. }
  204. /**
  205. * intel_display_power_is_enabled - check for a power domain
  206. * @dev_priv: i915 device instance
  207. * @domain: power domain to check
  208. *
  209. * This function can be used to check the hw power domain state. It is mostly
  210. * used in hardware state readout functions. Everywhere else code should rely
  211. * upon explicit power domain reference counting to ensure that the hardware
  212. * block is powered up before accessing it.
  213. *
  214. * Callers must hold the relevant modesetting locks to ensure that concurrent
  215. * threads can't disable the power well while the caller tries to read a few
  216. * registers.
  217. *
  218. * Returns:
  219. * True when the power domain is enabled, false otherwise.
  220. */
  221. bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  222. enum intel_display_power_domain domain)
  223. {
  224. struct i915_power_domains *power_domains;
  225. bool ret;
  226. power_domains = &dev_priv->power_domains;
  227. mutex_lock(&power_domains->lock);
  228. ret = __intel_display_power_is_enabled(dev_priv, domain);
  229. mutex_unlock(&power_domains->lock);
  230. return ret;
  231. }
  232. /**
  233. * intel_display_set_init_power - set the initial power domain state
  234. * @dev_priv: i915 device instance
  235. * @enable: whether to enable or disable the initial power domain state
  236. *
  237. * For simplicity our driver load/unload and system suspend/resume code assumes
  238. * that all power domains are always enabled. This functions controls the state
  239. * of this little hack. While the initial power domain state is enabled runtime
  240. * pm is effectively disabled.
  241. */
  242. void intel_display_set_init_power(struct drm_i915_private *dev_priv,
  243. bool enable)
  244. {
  245. if (dev_priv->power_domains.init_power_on == enable)
  246. return;
  247. if (enable)
  248. intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  249. else
  250. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  251. dev_priv->power_domains.init_power_on = enable;
  252. }
  253. /*
  254. * Starting with Haswell, we have a "Power Down Well" that can be turned off
  255. * when not needed anymore. We have 4 registers that can request the power well
  256. * to be enabled, and it will only be disabled if none of the registers is
  257. * requesting it to be enabled.
  258. */
  259. static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
  260. {
  261. struct pci_dev *pdev = dev_priv->drm.pdev;
  262. /*
  263. * After we re-enable the power well, if we touch VGA register 0x3d5
  264. * we'll get unclaimed register interrupts. This stops after we write
  265. * anything to the VGA MSR register. The vgacon module uses this
  266. * register all the time, so if we unbind our driver and, as a
  267. * consequence, bind vgacon, we'll get stuck in an infinite loop at
  268. * console_unlock(). So make here we touch the VGA MSR register, making
  269. * sure vgacon can keep working normally without triggering interrupts
  270. * and error messages.
  271. */
  272. vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
  273. outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  274. vga_put(pdev, VGA_RSRC_LEGACY_IO);
  275. if (IS_BROADWELL(dev_priv))
  276. gen8_irq_power_well_post_enable(dev_priv,
  277. 1 << PIPE_C | 1 << PIPE_B);
  278. }
  279. static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
  280. {
  281. if (IS_BROADWELL(dev_priv))
  282. gen8_irq_power_well_pre_disable(dev_priv,
  283. 1 << PIPE_C | 1 << PIPE_B);
  284. }
  285. static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
  286. struct i915_power_well *power_well)
  287. {
  288. struct pci_dev *pdev = dev_priv->drm.pdev;
  289. /*
  290. * After we re-enable the power well, if we touch VGA register 0x3d5
  291. * we'll get unclaimed register interrupts. This stops after we write
  292. * anything to the VGA MSR register. The vgacon module uses this
  293. * register all the time, so if we unbind our driver and, as a
  294. * consequence, bind vgacon, we'll get stuck in an infinite loop at
  295. * console_unlock(). So make here we touch the VGA MSR register, making
  296. * sure vgacon can keep working normally without triggering interrupts
  297. * and error messages.
  298. */
  299. if (power_well->id == SKL_DISP_PW_2) {
  300. vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
  301. outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  302. vga_put(pdev, VGA_RSRC_LEGACY_IO);
  303. gen8_irq_power_well_post_enable(dev_priv,
  304. 1 << PIPE_C | 1 << PIPE_B);
  305. }
  306. }
  307. static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
  308. struct i915_power_well *power_well)
  309. {
  310. if (power_well->id == SKL_DISP_PW_2)
  311. gen8_irq_power_well_pre_disable(dev_priv,
  312. 1 << PIPE_C | 1 << PIPE_B);
  313. }
  314. static void hsw_set_power_well(struct drm_i915_private *dev_priv,
  315. struct i915_power_well *power_well, bool enable)
  316. {
  317. bool is_enabled, enable_requested;
  318. uint32_t tmp;
  319. tmp = I915_READ(HSW_PWR_WELL_DRIVER);
  320. is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
  321. enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
  322. if (enable) {
  323. if (!enable_requested)
  324. I915_WRITE(HSW_PWR_WELL_DRIVER,
  325. HSW_PWR_WELL_ENABLE_REQUEST);
  326. if (!is_enabled) {
  327. DRM_DEBUG_KMS("Enabling power well\n");
  328. if (intel_wait_for_register(dev_priv,
  329. HSW_PWR_WELL_DRIVER,
  330. HSW_PWR_WELL_STATE_ENABLED,
  331. HSW_PWR_WELL_STATE_ENABLED,
  332. 20))
  333. DRM_ERROR("Timeout enabling power well\n");
  334. hsw_power_well_post_enable(dev_priv);
  335. }
  336. } else {
  337. if (enable_requested) {
  338. hsw_power_well_pre_disable(dev_priv);
  339. I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
  340. POSTING_READ(HSW_PWR_WELL_DRIVER);
  341. DRM_DEBUG_KMS("Requesting to disable the power well\n");
  342. }
  343. }
  344. }
  345. #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  346. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  347. BIT(POWER_DOMAIN_PIPE_B) | \
  348. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  349. BIT(POWER_DOMAIN_PIPE_C) | \
  350. BIT(POWER_DOMAIN_TRANSCODER_C) | \
  351. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  352. BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  353. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  354. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  355. BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  356. BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
  357. BIT(POWER_DOMAIN_AUX_B) | \
  358. BIT(POWER_DOMAIN_AUX_C) | \
  359. BIT(POWER_DOMAIN_AUX_D) | \
  360. BIT(POWER_DOMAIN_AUDIO) | \
  361. BIT(POWER_DOMAIN_VGA) | \
  362. BIT(POWER_DOMAIN_INIT))
  363. #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
  364. BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
  365. BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
  366. BIT(POWER_DOMAIN_INIT))
  367. #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
  368. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  369. BIT(POWER_DOMAIN_INIT))
  370. #define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
  371. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  372. BIT(POWER_DOMAIN_INIT))
  373. #define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
  374. BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  375. BIT(POWER_DOMAIN_INIT))
  376. #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
  377. SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  378. BIT(POWER_DOMAIN_MODESET) | \
  379. BIT(POWER_DOMAIN_AUX_A) | \
  380. BIT(POWER_DOMAIN_INIT))
  381. #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  382. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  383. BIT(POWER_DOMAIN_PIPE_B) | \
  384. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  385. BIT(POWER_DOMAIN_PIPE_C) | \
  386. BIT(POWER_DOMAIN_TRANSCODER_C) | \
  387. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  388. BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  389. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  390. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  391. BIT(POWER_DOMAIN_AUX_B) | \
  392. BIT(POWER_DOMAIN_AUX_C) | \
  393. BIT(POWER_DOMAIN_AUDIO) | \
  394. BIT(POWER_DOMAIN_VGA) | \
  395. BIT(POWER_DOMAIN_GMBUS) | \
  396. BIT(POWER_DOMAIN_INIT))
  397. #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
  398. BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  399. BIT(POWER_DOMAIN_MODESET) | \
  400. BIT(POWER_DOMAIN_AUX_A) | \
  401. BIT(POWER_DOMAIN_INIT))
  402. #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
  403. BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
  404. BIT(POWER_DOMAIN_AUX_A) | \
  405. BIT(POWER_DOMAIN_INIT))
  406. #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
  407. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  408. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  409. BIT(POWER_DOMAIN_AUX_B) | \
  410. BIT(POWER_DOMAIN_AUX_C) | \
  411. BIT(POWER_DOMAIN_INIT))
  412. static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
  413. {
  414. WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
  415. "DC9 already programmed to be enabled.\n");
  416. WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
  417. "DC5 still not disabled to enable DC9.\n");
  418. WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
  419. WARN_ONCE(intel_irqs_enabled(dev_priv),
  420. "Interrupts not disabled yet.\n");
  421. /*
  422. * TODO: check for the following to verify the conditions to enter DC9
  423. * state are satisfied:
  424. * 1] Check relevant display engine registers to verify if mode set
  425. * disable sequence was followed.
  426. * 2] Check if display uninitialize sequence is initialized.
  427. */
  428. }
  429. static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
  430. {
  431. WARN_ONCE(intel_irqs_enabled(dev_priv),
  432. "Interrupts not disabled yet.\n");
  433. WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
  434. "DC5 still not disabled.\n");
  435. /*
  436. * TODO: check for the following to verify DC9 state was indeed
  437. * entered before programming to disable it:
  438. * 1] Check relevant display engine registers to verify if mode
  439. * set disable sequence was followed.
  440. * 2] Check if display uninitialize sequence is initialized.
  441. */
  442. }
  443. static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
  444. u32 state)
  445. {
  446. int rewrites = 0;
  447. int rereads = 0;
  448. u32 v;
  449. I915_WRITE(DC_STATE_EN, state);
  450. /* It has been observed that disabling the dc6 state sometimes
  451. * doesn't stick and dmc keeps returning old value. Make sure
  452. * the write really sticks enough times and also force rewrite until
  453. * we are confident that state is exactly what we want.
  454. */
  455. do {
  456. v = I915_READ(DC_STATE_EN);
  457. if (v != state) {
  458. I915_WRITE(DC_STATE_EN, state);
  459. rewrites++;
  460. rereads = 0;
  461. } else if (rereads++ > 5) {
  462. break;
  463. }
  464. } while (rewrites < 100);
  465. if (v != state)
  466. DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
  467. state, v);
  468. /* Most of the times we need one retry, avoid spam */
  469. if (rewrites > 1)
  470. DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
  471. state, rewrites);
  472. }
  473. static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
  474. {
  475. u32 mask;
  476. mask = DC_STATE_EN_UPTO_DC5;
  477. if (IS_BROXTON(dev_priv))
  478. mask |= DC_STATE_EN_DC9;
  479. else
  480. mask |= DC_STATE_EN_UPTO_DC6;
  481. return mask;
  482. }
  483. void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
  484. {
  485. u32 val;
  486. val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
  487. DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
  488. dev_priv->csr.dc_state, val);
  489. dev_priv->csr.dc_state = val;
  490. }
  491. static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
  492. {
  493. uint32_t val;
  494. uint32_t mask;
  495. if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
  496. state &= dev_priv->csr.allowed_dc_mask;
  497. val = I915_READ(DC_STATE_EN);
  498. mask = gen9_dc_mask(dev_priv);
  499. DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
  500. val & mask, state);
  501. /* Check if DMC is ignoring our DC state requests */
  502. if ((val & mask) != dev_priv->csr.dc_state)
  503. DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
  504. dev_priv->csr.dc_state, val & mask);
  505. val &= ~mask;
  506. val |= state;
  507. gen9_write_dc_state(dev_priv, val);
  508. dev_priv->csr.dc_state = val & mask;
  509. }
  510. void bxt_enable_dc9(struct drm_i915_private *dev_priv)
  511. {
  512. assert_can_enable_dc9(dev_priv);
  513. DRM_DEBUG_KMS("Enabling DC9\n");
  514. intel_power_sequencer_reset(dev_priv);
  515. gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
  516. }
  517. void bxt_disable_dc9(struct drm_i915_private *dev_priv)
  518. {
  519. assert_can_disable_dc9(dev_priv);
  520. DRM_DEBUG_KMS("Disabling DC9\n");
  521. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  522. intel_pps_unlock_regs_wa(dev_priv);
  523. }
  524. static void assert_csr_loaded(struct drm_i915_private *dev_priv)
  525. {
  526. WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
  527. "CSR program storage start is NULL\n");
  528. WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
  529. WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
  530. }
  531. static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
  532. {
  533. bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
  534. SKL_DISP_PW_2);
  535. WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
  536. WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
  537. "DC5 already programmed to be enabled.\n");
  538. assert_rpm_wakelock_held(dev_priv);
  539. assert_csr_loaded(dev_priv);
  540. }
  541. void gen9_enable_dc5(struct drm_i915_private *dev_priv)
  542. {
  543. assert_can_enable_dc5(dev_priv);
  544. DRM_DEBUG_KMS("Enabling DC5\n");
  545. gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
  546. }
  547. static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
  548. {
  549. WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
  550. "Backlight is not disabled.\n");
  551. WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
  552. "DC6 already programmed to be enabled.\n");
  553. assert_csr_loaded(dev_priv);
  554. }
  555. void skl_enable_dc6(struct drm_i915_private *dev_priv)
  556. {
  557. assert_can_enable_dc6(dev_priv);
  558. DRM_DEBUG_KMS("Enabling DC6\n");
  559. gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
  560. }
  561. void skl_disable_dc6(struct drm_i915_private *dev_priv)
  562. {
  563. DRM_DEBUG_KMS("Disabling DC6\n");
  564. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  565. }
  566. static void
  567. gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
  568. struct i915_power_well *power_well)
  569. {
  570. enum skl_disp_power_wells power_well_id = power_well->id;
  571. u32 val;
  572. u32 mask;
  573. mask = SKL_POWER_WELL_REQ(power_well_id);
  574. val = I915_READ(HSW_PWR_WELL_KVMR);
  575. if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
  576. power_well->name))
  577. I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
  578. val = I915_READ(HSW_PWR_WELL_BIOS);
  579. val |= I915_READ(HSW_PWR_WELL_DEBUG);
  580. if (!(val & mask))
  581. return;
  582. /*
  583. * DMC is known to force on the request bits for power well 1 on SKL
  584. * and BXT and the misc IO power well on SKL but we don't expect any
  585. * other request bits to be set, so WARN for those.
  586. */
  587. if (power_well_id == SKL_DISP_PW_1 ||
  588. ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
  589. power_well_id == SKL_DISP_PW_MISC_IO))
  590. DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
  591. "by DMC\n", power_well->name);
  592. else
  593. WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
  594. power_well->name);
  595. I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
  596. I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
  597. }
  598. static void skl_set_power_well(struct drm_i915_private *dev_priv,
  599. struct i915_power_well *power_well, bool enable)
  600. {
  601. uint32_t tmp, fuse_status;
  602. uint32_t req_mask, state_mask;
  603. bool is_enabled, enable_requested, check_fuse_status = false;
  604. tmp = I915_READ(HSW_PWR_WELL_DRIVER);
  605. fuse_status = I915_READ(SKL_FUSE_STATUS);
  606. switch (power_well->id) {
  607. case SKL_DISP_PW_1:
  608. if (intel_wait_for_register(dev_priv,
  609. SKL_FUSE_STATUS,
  610. SKL_FUSE_PG0_DIST_STATUS,
  611. SKL_FUSE_PG0_DIST_STATUS,
  612. 1)) {
  613. DRM_ERROR("PG0 not enabled\n");
  614. return;
  615. }
  616. break;
  617. case SKL_DISP_PW_2:
  618. if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
  619. DRM_ERROR("PG1 in disabled state\n");
  620. return;
  621. }
  622. break;
  623. case SKL_DISP_PW_DDI_A_E:
  624. case SKL_DISP_PW_DDI_B:
  625. case SKL_DISP_PW_DDI_C:
  626. case SKL_DISP_PW_DDI_D:
  627. case SKL_DISP_PW_MISC_IO:
  628. break;
  629. default:
  630. WARN(1, "Unknown power well %lu\n", power_well->id);
  631. return;
  632. }
  633. req_mask = SKL_POWER_WELL_REQ(power_well->id);
  634. enable_requested = tmp & req_mask;
  635. state_mask = SKL_POWER_WELL_STATE(power_well->id);
  636. is_enabled = tmp & state_mask;
  637. if (!enable && enable_requested)
  638. skl_power_well_pre_disable(dev_priv, power_well);
  639. if (enable) {
  640. if (!enable_requested) {
  641. WARN((tmp & state_mask) &&
  642. !I915_READ(HSW_PWR_WELL_BIOS),
  643. "Invalid for power well status to be enabled, unless done by the BIOS, \
  644. when request is to disable!\n");
  645. I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
  646. }
  647. if (!is_enabled) {
  648. DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
  649. check_fuse_status = true;
  650. }
  651. } else {
  652. if (enable_requested) {
  653. I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
  654. POSTING_READ(HSW_PWR_WELL_DRIVER);
  655. DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
  656. }
  657. if (IS_GEN9(dev_priv))
  658. gen9_sanitize_power_well_requests(dev_priv, power_well);
  659. }
  660. if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
  661. 1))
  662. DRM_ERROR("%s %s timeout\n",
  663. power_well->name, enable ? "enable" : "disable");
  664. if (check_fuse_status) {
  665. if (power_well->id == SKL_DISP_PW_1) {
  666. if (intel_wait_for_register(dev_priv,
  667. SKL_FUSE_STATUS,
  668. SKL_FUSE_PG1_DIST_STATUS,
  669. SKL_FUSE_PG1_DIST_STATUS,
  670. 1))
  671. DRM_ERROR("PG1 distributing status timeout\n");
  672. } else if (power_well->id == SKL_DISP_PW_2) {
  673. if (intel_wait_for_register(dev_priv,
  674. SKL_FUSE_STATUS,
  675. SKL_FUSE_PG2_DIST_STATUS,
  676. SKL_FUSE_PG2_DIST_STATUS,
  677. 1))
  678. DRM_ERROR("PG2 distributing status timeout\n");
  679. }
  680. }
  681. if (enable && !is_enabled)
  682. skl_power_well_post_enable(dev_priv, power_well);
  683. }
  684. static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
  685. struct i915_power_well *power_well)
  686. {
  687. hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
  688. /*
  689. * We're taking over the BIOS, so clear any requests made by it since
  690. * the driver is in charge now.
  691. */
  692. if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
  693. I915_WRITE(HSW_PWR_WELL_BIOS, 0);
  694. }
  695. static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
  696. struct i915_power_well *power_well)
  697. {
  698. hsw_set_power_well(dev_priv, power_well, true);
  699. }
  700. static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
  701. struct i915_power_well *power_well)
  702. {
  703. hsw_set_power_well(dev_priv, power_well, false);
  704. }
  705. static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
  706. struct i915_power_well *power_well)
  707. {
  708. uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
  709. SKL_POWER_WELL_STATE(power_well->id);
  710. return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
  711. }
  712. static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
  713. struct i915_power_well *power_well)
  714. {
  715. skl_set_power_well(dev_priv, power_well, power_well->count > 0);
  716. /* Clear any request made by BIOS as driver is taking over */
  717. I915_WRITE(HSW_PWR_WELL_BIOS, 0);
  718. }
  719. static void skl_power_well_enable(struct drm_i915_private *dev_priv,
  720. struct i915_power_well *power_well)
  721. {
  722. skl_set_power_well(dev_priv, power_well, true);
  723. }
  724. static void skl_power_well_disable(struct drm_i915_private *dev_priv,
  725. struct i915_power_well *power_well)
  726. {
  727. skl_set_power_well(dev_priv, power_well, false);
  728. }
  729. static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  730. struct i915_power_well *power_well)
  731. {
  732. bxt_ddi_phy_init(dev_priv, power_well->data);
  733. }
  734. static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  735. struct i915_power_well *power_well)
  736. {
  737. bxt_ddi_phy_uninit(dev_priv, power_well->data);
  738. }
  739. static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
  740. struct i915_power_well *power_well)
  741. {
  742. return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
  743. }
  744. static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
  745. struct i915_power_well *power_well)
  746. {
  747. if (power_well->count > 0)
  748. bxt_dpio_cmn_power_well_enable(dev_priv, power_well);
  749. else
  750. bxt_dpio_cmn_power_well_disable(dev_priv, power_well);
  751. }
  752. static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
  753. {
  754. struct i915_power_well *power_well;
  755. power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
  756. if (power_well->count > 0)
  757. bxt_ddi_phy_verify_state(dev_priv, power_well->data);
  758. power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
  759. if (power_well->count > 0)
  760. bxt_ddi_phy_verify_state(dev_priv, power_well->data);
  761. }
  762. static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
  763. struct i915_power_well *power_well)
  764. {
  765. return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
  766. }
  767. static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
  768. {
  769. u32 tmp = I915_READ(DBUF_CTL);
  770. WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
  771. (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
  772. "Unexpected DBuf power power state (0x%08x)\n", tmp);
  773. }
  774. static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
  775. struct i915_power_well *power_well)
  776. {
  777. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  778. WARN_ON(dev_priv->cdclk_freq !=
  779. dev_priv->display.get_display_clock_speed(dev_priv));
  780. gen9_assert_dbuf_enabled(dev_priv);
  781. if (IS_BROXTON(dev_priv))
  782. bxt_verify_ddi_phy_power_wells(dev_priv);
  783. }
  784. static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
  785. struct i915_power_well *power_well)
  786. {
  787. if (!dev_priv->csr.dmc_payload)
  788. return;
  789. if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
  790. skl_enable_dc6(dev_priv);
  791. else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
  792. gen9_enable_dc5(dev_priv);
  793. }
  794. static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
  795. struct i915_power_well *power_well)
  796. {
  797. if (power_well->count > 0)
  798. gen9_dc_off_power_well_enable(dev_priv, power_well);
  799. else
  800. gen9_dc_off_power_well_disable(dev_priv, power_well);
  801. }
  802. static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
  803. struct i915_power_well *power_well)
  804. {
  805. }
  806. static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
  807. struct i915_power_well *power_well)
  808. {
  809. return true;
  810. }
  811. static void vlv_set_power_well(struct drm_i915_private *dev_priv,
  812. struct i915_power_well *power_well, bool enable)
  813. {
  814. enum punit_power_well power_well_id = power_well->id;
  815. u32 mask;
  816. u32 state;
  817. u32 ctrl;
  818. mask = PUNIT_PWRGT_MASK(power_well_id);
  819. state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
  820. PUNIT_PWRGT_PWR_GATE(power_well_id);
  821. mutex_lock(&dev_priv->rps.hw_lock);
  822. #define COND \
  823. ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
  824. if (COND)
  825. goto out;
  826. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
  827. ctrl &= ~mask;
  828. ctrl |= state;
  829. vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
  830. if (wait_for(COND, 100))
  831. DRM_ERROR("timeout setting power well state %08x (%08x)\n",
  832. state,
  833. vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
  834. #undef COND
  835. out:
  836. mutex_unlock(&dev_priv->rps.hw_lock);
  837. }
  838. static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
  839. struct i915_power_well *power_well)
  840. {
  841. vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
  842. }
  843. static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
  844. struct i915_power_well *power_well)
  845. {
  846. vlv_set_power_well(dev_priv, power_well, true);
  847. }
  848. static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
  849. struct i915_power_well *power_well)
  850. {
  851. vlv_set_power_well(dev_priv, power_well, false);
  852. }
  853. static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
  854. struct i915_power_well *power_well)
  855. {
  856. int power_well_id = power_well->id;
  857. bool enabled = false;
  858. u32 mask;
  859. u32 state;
  860. u32 ctrl;
  861. mask = PUNIT_PWRGT_MASK(power_well_id);
  862. ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
  863. mutex_lock(&dev_priv->rps.hw_lock);
  864. state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
  865. /*
  866. * We only ever set the power-on and power-gate states, anything
  867. * else is unexpected.
  868. */
  869. WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
  870. state != PUNIT_PWRGT_PWR_GATE(power_well_id));
  871. if (state == ctrl)
  872. enabled = true;
  873. /*
  874. * A transient state at this point would mean some unexpected party
  875. * is poking at the power controls too.
  876. */
  877. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
  878. WARN_ON(ctrl != state);
  879. mutex_unlock(&dev_priv->rps.hw_lock);
  880. return enabled;
  881. }
  882. static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
  883. {
  884. I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
  885. /*
  886. * Disable trickle feed and enable pnd deadline calculation
  887. */
  888. I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
  889. I915_WRITE(CBR1_VLV, 0);
  890. WARN_ON(dev_priv->rawclk_freq == 0);
  891. I915_WRITE(RAWCLK_FREQ_VLV,
  892. DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
  893. }
  894. static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
  895. {
  896. struct intel_encoder *encoder;
  897. enum pipe pipe;
  898. /*
  899. * Enable the CRI clock source so we can get at the
  900. * display and the reference clock for VGA
  901. * hotplug / manual detection. Supposedly DSI also
  902. * needs the ref clock up and running.
  903. *
  904. * CHV DPLL B/C have some issues if VGA mode is enabled.
  905. */
  906. for_each_pipe(dev_priv, pipe) {
  907. u32 val = I915_READ(DPLL(pipe));
  908. val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  909. if (pipe != PIPE_A)
  910. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  911. I915_WRITE(DPLL(pipe), val);
  912. }
  913. vlv_init_display_clock_gating(dev_priv);
  914. spin_lock_irq(&dev_priv->irq_lock);
  915. valleyview_enable_display_irqs(dev_priv);
  916. spin_unlock_irq(&dev_priv->irq_lock);
  917. /*
  918. * During driver initialization/resume we can avoid restoring the
  919. * part of the HW/SW state that will be inited anyway explicitly.
  920. */
  921. if (dev_priv->power_domains.initializing)
  922. return;
  923. intel_hpd_init(dev_priv);
  924. /* Re-enable the ADPA, if we have one */
  925. for_each_intel_encoder(&dev_priv->drm, encoder) {
  926. if (encoder->type == INTEL_OUTPUT_ANALOG)
  927. intel_crt_reset(&encoder->base);
  928. }
  929. i915_redisable_vga_power_on(dev_priv);
  930. intel_pps_unlock_regs_wa(dev_priv);
  931. }
  932. static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
  933. {
  934. spin_lock_irq(&dev_priv->irq_lock);
  935. valleyview_disable_display_irqs(dev_priv);
  936. spin_unlock_irq(&dev_priv->irq_lock);
  937. /* make sure we're done processing display irqs */
  938. synchronize_irq(dev_priv->drm.irq);
  939. intel_power_sequencer_reset(dev_priv);
  940. /* Prevent us from re-enabling polling on accident in late suspend */
  941. if (!dev_priv->drm.dev->power.is_suspended)
  942. intel_hpd_poll_init(dev_priv);
  943. }
  944. static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
  945. struct i915_power_well *power_well)
  946. {
  947. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
  948. vlv_set_power_well(dev_priv, power_well, true);
  949. vlv_display_power_well_init(dev_priv);
  950. }
  951. static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
  952. struct i915_power_well *power_well)
  953. {
  954. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
  955. vlv_display_power_well_deinit(dev_priv);
  956. vlv_set_power_well(dev_priv, power_well, false);
  957. }
  958. static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  959. struct i915_power_well *power_well)
  960. {
  961. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
  962. /* since ref/cri clock was enabled */
  963. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  964. vlv_set_power_well(dev_priv, power_well, true);
  965. /*
  966. * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
  967. * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
  968. * a. GUnit 0x2110 bit[0] set to 1 (def 0)
  969. * b. The other bits such as sfr settings / modesel may all
  970. * be set to 0.
  971. *
  972. * This should only be done on init and resume from S3 with
  973. * both PLLs disabled, or we risk losing DPIO and PLL
  974. * synchronization.
  975. */
  976. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
  977. }
  978. static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  979. struct i915_power_well *power_well)
  980. {
  981. enum pipe pipe;
  982. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
  983. for_each_pipe(dev_priv, pipe)
  984. assert_pll_disabled(dev_priv, pipe);
  985. /* Assert common reset */
  986. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
  987. vlv_set_power_well(dev_priv, power_well, false);
  988. }
  989. #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
  990. static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
  991. int power_well_id)
  992. {
  993. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  994. int i;
  995. for (i = 0; i < power_domains->power_well_count; i++) {
  996. struct i915_power_well *power_well;
  997. power_well = &power_domains->power_wells[i];
  998. if (power_well->id == power_well_id)
  999. return power_well;
  1000. }
  1001. return NULL;
  1002. }
  1003. #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
  1004. static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
  1005. {
  1006. struct i915_power_well *cmn_bc =
  1007. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  1008. struct i915_power_well *cmn_d =
  1009. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
  1010. u32 phy_control = dev_priv->chv_phy_control;
  1011. u32 phy_status = 0;
  1012. u32 phy_status_mask = 0xffffffff;
  1013. /*
  1014. * The BIOS can leave the PHY is some weird state
  1015. * where it doesn't fully power down some parts.
  1016. * Disable the asserts until the PHY has been fully
  1017. * reset (ie. the power well has been disabled at
  1018. * least once).
  1019. */
  1020. if (!dev_priv->chv_phy_assert[DPIO_PHY0])
  1021. phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
  1022. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
  1023. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
  1024. PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
  1025. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
  1026. PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
  1027. if (!dev_priv->chv_phy_assert[DPIO_PHY1])
  1028. phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
  1029. PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
  1030. PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
  1031. if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
  1032. phy_status |= PHY_POWERGOOD(DPIO_PHY0);
  1033. /* this assumes override is only used to enable lanes */
  1034. if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
  1035. phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
  1036. if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
  1037. phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
  1038. /* CL1 is on whenever anything is on in either channel */
  1039. if (BITS_SET(phy_control,
  1040. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
  1041. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
  1042. phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
  1043. /*
  1044. * The DPLLB check accounts for the pipe B + port A usage
  1045. * with CL2 powered up but all the lanes in the second channel
  1046. * powered down.
  1047. */
  1048. if (BITS_SET(phy_control,
  1049. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
  1050. (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
  1051. phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
  1052. if (BITS_SET(phy_control,
  1053. PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
  1054. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
  1055. if (BITS_SET(phy_control,
  1056. PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
  1057. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
  1058. if (BITS_SET(phy_control,
  1059. PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
  1060. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
  1061. if (BITS_SET(phy_control,
  1062. PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
  1063. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
  1064. }
  1065. if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
  1066. phy_status |= PHY_POWERGOOD(DPIO_PHY1);
  1067. /* this assumes override is only used to enable lanes */
  1068. if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
  1069. phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
  1070. if (BITS_SET(phy_control,
  1071. PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
  1072. phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
  1073. if (BITS_SET(phy_control,
  1074. PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
  1075. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
  1076. if (BITS_SET(phy_control,
  1077. PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
  1078. phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
  1079. }
  1080. phy_status &= phy_status_mask;
  1081. /*
  1082. * The PHY may be busy with some initial calibration and whatnot,
  1083. * so the power state can take a while to actually change.
  1084. */
  1085. if (intel_wait_for_register(dev_priv,
  1086. DISPLAY_PHY_STATUS,
  1087. phy_status_mask,
  1088. phy_status,
  1089. 10))
  1090. DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
  1091. I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
  1092. phy_status, dev_priv->chv_phy_control);
  1093. }
  1094. #undef BITS_SET
  1095. static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  1096. struct i915_power_well *power_well)
  1097. {
  1098. enum dpio_phy phy;
  1099. enum pipe pipe;
  1100. uint32_t tmp;
  1101. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  1102. power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
  1103. if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  1104. pipe = PIPE_A;
  1105. phy = DPIO_PHY0;
  1106. } else {
  1107. pipe = PIPE_C;
  1108. phy = DPIO_PHY1;
  1109. }
  1110. /* since ref/cri clock was enabled */
  1111. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  1112. vlv_set_power_well(dev_priv, power_well, true);
  1113. /* Poll for phypwrgood signal */
  1114. if (intel_wait_for_register(dev_priv,
  1115. DISPLAY_PHY_STATUS,
  1116. PHY_POWERGOOD(phy),
  1117. PHY_POWERGOOD(phy),
  1118. 1))
  1119. DRM_ERROR("Display PHY %d is not power up\n", phy);
  1120. mutex_lock(&dev_priv->sb_lock);
  1121. /* Enable dynamic power down */
  1122. tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
  1123. tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
  1124. DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
  1125. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
  1126. if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  1127. tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
  1128. tmp |= DPIO_DYNPWRDOWNEN_CH1;
  1129. vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
  1130. } else {
  1131. /*
  1132. * Force the non-existing CL2 off. BXT does this
  1133. * too, so maybe it saves some power even though
  1134. * CL2 doesn't exist?
  1135. */
  1136. tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
  1137. tmp |= DPIO_CL2_LDOFUSE_PWRENB;
  1138. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
  1139. }
  1140. mutex_unlock(&dev_priv->sb_lock);
  1141. dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
  1142. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1143. DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
  1144. phy, dev_priv->chv_phy_control);
  1145. assert_chv_phy_status(dev_priv);
  1146. }
  1147. static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  1148. struct i915_power_well *power_well)
  1149. {
  1150. enum dpio_phy phy;
  1151. WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  1152. power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
  1153. if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  1154. phy = DPIO_PHY0;
  1155. assert_pll_disabled(dev_priv, PIPE_A);
  1156. assert_pll_disabled(dev_priv, PIPE_B);
  1157. } else {
  1158. phy = DPIO_PHY1;
  1159. assert_pll_disabled(dev_priv, PIPE_C);
  1160. }
  1161. dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
  1162. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1163. vlv_set_power_well(dev_priv, power_well, false);
  1164. DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
  1165. phy, dev_priv->chv_phy_control);
  1166. /* PHY is fully reset now, so we can enable the PHY state asserts */
  1167. dev_priv->chv_phy_assert[phy] = true;
  1168. assert_chv_phy_status(dev_priv);
  1169. }
  1170. static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
  1171. enum dpio_channel ch, bool override, unsigned int mask)
  1172. {
  1173. enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
  1174. u32 reg, val, expected, actual;
  1175. /*
  1176. * The BIOS can leave the PHY is some weird state
  1177. * where it doesn't fully power down some parts.
  1178. * Disable the asserts until the PHY has been fully
  1179. * reset (ie. the power well has been disabled at
  1180. * least once).
  1181. */
  1182. if (!dev_priv->chv_phy_assert[phy])
  1183. return;
  1184. if (ch == DPIO_CH0)
  1185. reg = _CHV_CMN_DW0_CH0;
  1186. else
  1187. reg = _CHV_CMN_DW6_CH1;
  1188. mutex_lock(&dev_priv->sb_lock);
  1189. val = vlv_dpio_read(dev_priv, pipe, reg);
  1190. mutex_unlock(&dev_priv->sb_lock);
  1191. /*
  1192. * This assumes !override is only used when the port is disabled.
  1193. * All lanes should power down even without the override when
  1194. * the port is disabled.
  1195. */
  1196. if (!override || mask == 0xf) {
  1197. expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
  1198. /*
  1199. * If CH1 common lane is not active anymore
  1200. * (eg. for pipe B DPLL) the entire channel will
  1201. * shut down, which causes the common lane registers
  1202. * to read as 0. That means we can't actually check
  1203. * the lane power down status bits, but as the entire
  1204. * register reads as 0 it's a good indication that the
  1205. * channel is indeed entirely powered down.
  1206. */
  1207. if (ch == DPIO_CH1 && val == 0)
  1208. expected = 0;
  1209. } else if (mask != 0x0) {
  1210. expected = DPIO_ANYDL_POWERDOWN;
  1211. } else {
  1212. expected = 0;
  1213. }
  1214. if (ch == DPIO_CH0)
  1215. actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
  1216. else
  1217. actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
  1218. actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
  1219. WARN(actual != expected,
  1220. "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
  1221. !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
  1222. !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
  1223. reg, val);
  1224. }
  1225. bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
  1226. enum dpio_channel ch, bool override)
  1227. {
  1228. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1229. bool was_override;
  1230. mutex_lock(&power_domains->lock);
  1231. was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1232. if (override == was_override)
  1233. goto out;
  1234. if (override)
  1235. dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1236. else
  1237. dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1238. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1239. DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
  1240. phy, ch, dev_priv->chv_phy_control);
  1241. assert_chv_phy_status(dev_priv);
  1242. out:
  1243. mutex_unlock(&power_domains->lock);
  1244. return was_override;
  1245. }
  1246. void chv_phy_powergate_lanes(struct intel_encoder *encoder,
  1247. bool override, unsigned int mask)
  1248. {
  1249. struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  1250. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1251. enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
  1252. enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
  1253. mutex_lock(&power_domains->lock);
  1254. dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
  1255. dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
  1256. if (override)
  1257. dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1258. else
  1259. dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
  1260. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  1261. DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
  1262. phy, ch, mask, dev_priv->chv_phy_control);
  1263. assert_chv_phy_status(dev_priv);
  1264. assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
  1265. mutex_unlock(&power_domains->lock);
  1266. }
  1267. static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
  1268. struct i915_power_well *power_well)
  1269. {
  1270. enum pipe pipe = power_well->id;
  1271. bool enabled;
  1272. u32 state, ctrl;
  1273. mutex_lock(&dev_priv->rps.hw_lock);
  1274. state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
  1275. /*
  1276. * We only ever set the power-on and power-gate states, anything
  1277. * else is unexpected.
  1278. */
  1279. WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
  1280. enabled = state == DP_SSS_PWR_ON(pipe);
  1281. /*
  1282. * A transient state at this point would mean some unexpected party
  1283. * is poking at the power controls too.
  1284. */
  1285. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
  1286. WARN_ON(ctrl << 16 != state);
  1287. mutex_unlock(&dev_priv->rps.hw_lock);
  1288. return enabled;
  1289. }
  1290. static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
  1291. struct i915_power_well *power_well,
  1292. bool enable)
  1293. {
  1294. enum pipe pipe = power_well->id;
  1295. u32 state;
  1296. u32 ctrl;
  1297. state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
  1298. mutex_lock(&dev_priv->rps.hw_lock);
  1299. #define COND \
  1300. ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
  1301. if (COND)
  1302. goto out;
  1303. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  1304. ctrl &= ~DP_SSC_MASK(pipe);
  1305. ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
  1306. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
  1307. if (wait_for(COND, 100))
  1308. DRM_ERROR("timeout setting power well state %08x (%08x)\n",
  1309. state,
  1310. vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
  1311. #undef COND
  1312. out:
  1313. mutex_unlock(&dev_priv->rps.hw_lock);
  1314. }
  1315. static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
  1316. struct i915_power_well *power_well)
  1317. {
  1318. WARN_ON_ONCE(power_well->id != PIPE_A);
  1319. chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
  1320. }
  1321. static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
  1322. struct i915_power_well *power_well)
  1323. {
  1324. WARN_ON_ONCE(power_well->id != PIPE_A);
  1325. chv_set_pipe_power_well(dev_priv, power_well, true);
  1326. vlv_display_power_well_init(dev_priv);
  1327. }
  1328. static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
  1329. struct i915_power_well *power_well)
  1330. {
  1331. WARN_ON_ONCE(power_well->id != PIPE_A);
  1332. vlv_display_power_well_deinit(dev_priv);
  1333. chv_set_pipe_power_well(dev_priv, power_well, false);
  1334. }
  1335. static void
  1336. __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
  1337. enum intel_display_power_domain domain)
  1338. {
  1339. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1340. struct i915_power_well *power_well;
  1341. int i;
  1342. for_each_power_well(i, power_well, BIT(domain), power_domains)
  1343. intel_power_well_get(dev_priv, power_well);
  1344. power_domains->domain_use_count[domain]++;
  1345. }
  1346. /**
  1347. * intel_display_power_get - grab a power domain reference
  1348. * @dev_priv: i915 device instance
  1349. * @domain: power domain to reference
  1350. *
  1351. * This function grabs a power domain reference for @domain and ensures that the
  1352. * power domain and all its parents are powered up. Therefore users should only
  1353. * grab a reference to the innermost power domain they need.
  1354. *
  1355. * Any power domain reference obtained by this function must have a symmetric
  1356. * call to intel_display_power_put() to release the reference again.
  1357. */
  1358. void intel_display_power_get(struct drm_i915_private *dev_priv,
  1359. enum intel_display_power_domain domain)
  1360. {
  1361. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1362. intel_runtime_pm_get(dev_priv);
  1363. mutex_lock(&power_domains->lock);
  1364. __intel_display_power_get_domain(dev_priv, domain);
  1365. mutex_unlock(&power_domains->lock);
  1366. }
  1367. /**
  1368. * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
  1369. * @dev_priv: i915 device instance
  1370. * @domain: power domain to reference
  1371. *
  1372. * This function grabs a power domain reference for @domain and ensures that the
  1373. * power domain and all its parents are powered up. Therefore users should only
  1374. * grab a reference to the innermost power domain they need.
  1375. *
  1376. * Any power domain reference obtained by this function must have a symmetric
  1377. * call to intel_display_power_put() to release the reference again.
  1378. */
  1379. bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
  1380. enum intel_display_power_domain domain)
  1381. {
  1382. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1383. bool is_enabled;
  1384. if (!intel_runtime_pm_get_if_in_use(dev_priv))
  1385. return false;
  1386. mutex_lock(&power_domains->lock);
  1387. if (__intel_display_power_is_enabled(dev_priv, domain)) {
  1388. __intel_display_power_get_domain(dev_priv, domain);
  1389. is_enabled = true;
  1390. } else {
  1391. is_enabled = false;
  1392. }
  1393. mutex_unlock(&power_domains->lock);
  1394. if (!is_enabled)
  1395. intel_runtime_pm_put(dev_priv);
  1396. return is_enabled;
  1397. }
  1398. /**
  1399. * intel_display_power_put - release a power domain reference
  1400. * @dev_priv: i915 device instance
  1401. * @domain: power domain to reference
  1402. *
  1403. * This function drops the power domain reference obtained by
  1404. * intel_display_power_get() and might power down the corresponding hardware
  1405. * block right away if this is the last reference.
  1406. */
  1407. void intel_display_power_put(struct drm_i915_private *dev_priv,
  1408. enum intel_display_power_domain domain)
  1409. {
  1410. struct i915_power_domains *power_domains;
  1411. struct i915_power_well *power_well;
  1412. int i;
  1413. power_domains = &dev_priv->power_domains;
  1414. mutex_lock(&power_domains->lock);
  1415. WARN(!power_domains->domain_use_count[domain],
  1416. "Use count on domain %s is already zero\n",
  1417. intel_display_power_domain_str(domain));
  1418. power_domains->domain_use_count[domain]--;
  1419. for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
  1420. intel_power_well_put(dev_priv, power_well);
  1421. mutex_unlock(&power_domains->lock);
  1422. intel_runtime_pm_put(dev_priv);
  1423. }
  1424. #define HSW_DISPLAY_POWER_DOMAINS ( \
  1425. BIT(POWER_DOMAIN_PIPE_B) | \
  1426. BIT(POWER_DOMAIN_PIPE_C) | \
  1427. BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  1428. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1429. BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1430. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  1431. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  1432. BIT(POWER_DOMAIN_TRANSCODER_C) | \
  1433. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1434. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1435. BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1436. BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
  1437. BIT(POWER_DOMAIN_VGA) | \
  1438. BIT(POWER_DOMAIN_AUDIO) | \
  1439. BIT(POWER_DOMAIN_INIT))
  1440. #define BDW_DISPLAY_POWER_DOMAINS ( \
  1441. BIT(POWER_DOMAIN_PIPE_B) | \
  1442. BIT(POWER_DOMAIN_PIPE_C) | \
  1443. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1444. BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1445. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  1446. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  1447. BIT(POWER_DOMAIN_TRANSCODER_C) | \
  1448. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1449. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1450. BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1451. BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
  1452. BIT(POWER_DOMAIN_VGA) | \
  1453. BIT(POWER_DOMAIN_AUDIO) | \
  1454. BIT(POWER_DOMAIN_INIT))
  1455. #define VLV_DISPLAY_POWER_DOMAINS ( \
  1456. BIT(POWER_DOMAIN_PIPE_A) | \
  1457. BIT(POWER_DOMAIN_PIPE_B) | \
  1458. BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  1459. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1460. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  1461. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  1462. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1463. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1464. BIT(POWER_DOMAIN_PORT_DSI) | \
  1465. BIT(POWER_DOMAIN_PORT_CRT) | \
  1466. BIT(POWER_DOMAIN_VGA) | \
  1467. BIT(POWER_DOMAIN_AUDIO) | \
  1468. BIT(POWER_DOMAIN_AUX_B) | \
  1469. BIT(POWER_DOMAIN_AUX_C) | \
  1470. BIT(POWER_DOMAIN_GMBUS) | \
  1471. BIT(POWER_DOMAIN_INIT))
  1472. #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
  1473. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1474. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1475. BIT(POWER_DOMAIN_PORT_CRT) | \
  1476. BIT(POWER_DOMAIN_AUX_B) | \
  1477. BIT(POWER_DOMAIN_AUX_C) | \
  1478. BIT(POWER_DOMAIN_INIT))
  1479. #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
  1480. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1481. BIT(POWER_DOMAIN_AUX_B) | \
  1482. BIT(POWER_DOMAIN_INIT))
  1483. #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
  1484. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1485. BIT(POWER_DOMAIN_AUX_B) | \
  1486. BIT(POWER_DOMAIN_INIT))
  1487. #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
  1488. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1489. BIT(POWER_DOMAIN_AUX_C) | \
  1490. BIT(POWER_DOMAIN_INIT))
  1491. #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
  1492. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1493. BIT(POWER_DOMAIN_AUX_C) | \
  1494. BIT(POWER_DOMAIN_INIT))
  1495. #define CHV_DISPLAY_POWER_DOMAINS ( \
  1496. BIT(POWER_DOMAIN_PIPE_A) | \
  1497. BIT(POWER_DOMAIN_PIPE_B) | \
  1498. BIT(POWER_DOMAIN_PIPE_C) | \
  1499. BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  1500. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  1501. BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  1502. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  1503. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  1504. BIT(POWER_DOMAIN_TRANSCODER_C) | \
  1505. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1506. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1507. BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1508. BIT(POWER_DOMAIN_PORT_DSI) | \
  1509. BIT(POWER_DOMAIN_VGA) | \
  1510. BIT(POWER_DOMAIN_AUDIO) | \
  1511. BIT(POWER_DOMAIN_AUX_B) | \
  1512. BIT(POWER_DOMAIN_AUX_C) | \
  1513. BIT(POWER_DOMAIN_AUX_D) | \
  1514. BIT(POWER_DOMAIN_GMBUS) | \
  1515. BIT(POWER_DOMAIN_INIT))
  1516. #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
  1517. BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
  1518. BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
  1519. BIT(POWER_DOMAIN_AUX_B) | \
  1520. BIT(POWER_DOMAIN_AUX_C) | \
  1521. BIT(POWER_DOMAIN_INIT))
  1522. #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
  1523. BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
  1524. BIT(POWER_DOMAIN_AUX_D) | \
  1525. BIT(POWER_DOMAIN_INIT))
  1526. static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
  1527. .sync_hw = i9xx_always_on_power_well_noop,
  1528. .enable = i9xx_always_on_power_well_noop,
  1529. .disable = i9xx_always_on_power_well_noop,
  1530. .is_enabled = i9xx_always_on_power_well_enabled,
  1531. };
  1532. static const struct i915_power_well_ops chv_pipe_power_well_ops = {
  1533. .sync_hw = chv_pipe_power_well_sync_hw,
  1534. .enable = chv_pipe_power_well_enable,
  1535. .disable = chv_pipe_power_well_disable,
  1536. .is_enabled = chv_pipe_power_well_enabled,
  1537. };
  1538. static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
  1539. .sync_hw = vlv_power_well_sync_hw,
  1540. .enable = chv_dpio_cmn_power_well_enable,
  1541. .disable = chv_dpio_cmn_power_well_disable,
  1542. .is_enabled = vlv_power_well_enabled,
  1543. };
  1544. static struct i915_power_well i9xx_always_on_power_well[] = {
  1545. {
  1546. .name = "always-on",
  1547. .always_on = 1,
  1548. .domains = POWER_DOMAIN_MASK,
  1549. .ops = &i9xx_always_on_power_well_ops,
  1550. },
  1551. };
  1552. static const struct i915_power_well_ops hsw_power_well_ops = {
  1553. .sync_hw = hsw_power_well_sync_hw,
  1554. .enable = hsw_power_well_enable,
  1555. .disable = hsw_power_well_disable,
  1556. .is_enabled = hsw_power_well_enabled,
  1557. };
  1558. static const struct i915_power_well_ops skl_power_well_ops = {
  1559. .sync_hw = skl_power_well_sync_hw,
  1560. .enable = skl_power_well_enable,
  1561. .disable = skl_power_well_disable,
  1562. .is_enabled = skl_power_well_enabled,
  1563. };
  1564. static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
  1565. .sync_hw = gen9_dc_off_power_well_sync_hw,
  1566. .enable = gen9_dc_off_power_well_enable,
  1567. .disable = gen9_dc_off_power_well_disable,
  1568. .is_enabled = gen9_dc_off_power_well_enabled,
  1569. };
  1570. static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
  1571. .sync_hw = bxt_dpio_cmn_power_well_sync_hw,
  1572. .enable = bxt_dpio_cmn_power_well_enable,
  1573. .disable = bxt_dpio_cmn_power_well_disable,
  1574. .is_enabled = bxt_dpio_cmn_power_well_enabled,
  1575. };
  1576. static struct i915_power_well hsw_power_wells[] = {
  1577. {
  1578. .name = "always-on",
  1579. .always_on = 1,
  1580. .domains = POWER_DOMAIN_MASK,
  1581. .ops = &i9xx_always_on_power_well_ops,
  1582. },
  1583. {
  1584. .name = "display",
  1585. .domains = HSW_DISPLAY_POWER_DOMAINS,
  1586. .ops = &hsw_power_well_ops,
  1587. },
  1588. };
  1589. static struct i915_power_well bdw_power_wells[] = {
  1590. {
  1591. .name = "always-on",
  1592. .always_on = 1,
  1593. .domains = POWER_DOMAIN_MASK,
  1594. .ops = &i9xx_always_on_power_well_ops,
  1595. },
  1596. {
  1597. .name = "display",
  1598. .domains = BDW_DISPLAY_POWER_DOMAINS,
  1599. .ops = &hsw_power_well_ops,
  1600. },
  1601. };
  1602. static const struct i915_power_well_ops vlv_display_power_well_ops = {
  1603. .sync_hw = vlv_power_well_sync_hw,
  1604. .enable = vlv_display_power_well_enable,
  1605. .disable = vlv_display_power_well_disable,
  1606. .is_enabled = vlv_power_well_enabled,
  1607. };
  1608. static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
  1609. .sync_hw = vlv_power_well_sync_hw,
  1610. .enable = vlv_dpio_cmn_power_well_enable,
  1611. .disable = vlv_dpio_cmn_power_well_disable,
  1612. .is_enabled = vlv_power_well_enabled,
  1613. };
  1614. static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
  1615. .sync_hw = vlv_power_well_sync_hw,
  1616. .enable = vlv_power_well_enable,
  1617. .disable = vlv_power_well_disable,
  1618. .is_enabled = vlv_power_well_enabled,
  1619. };
  1620. static struct i915_power_well vlv_power_wells[] = {
  1621. {
  1622. .name = "always-on",
  1623. .always_on = 1,
  1624. .domains = POWER_DOMAIN_MASK,
  1625. .ops = &i9xx_always_on_power_well_ops,
  1626. .id = PUNIT_POWER_WELL_ALWAYS_ON,
  1627. },
  1628. {
  1629. .name = "display",
  1630. .domains = VLV_DISPLAY_POWER_DOMAINS,
  1631. .id = PUNIT_POWER_WELL_DISP2D,
  1632. .ops = &vlv_display_power_well_ops,
  1633. },
  1634. {
  1635. .name = "dpio-tx-b-01",
  1636. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1637. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1638. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1639. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1640. .ops = &vlv_dpio_power_well_ops,
  1641. .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
  1642. },
  1643. {
  1644. .name = "dpio-tx-b-23",
  1645. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1646. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1647. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1648. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1649. .ops = &vlv_dpio_power_well_ops,
  1650. .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
  1651. },
  1652. {
  1653. .name = "dpio-tx-c-01",
  1654. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1655. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1656. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1657. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1658. .ops = &vlv_dpio_power_well_ops,
  1659. .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
  1660. },
  1661. {
  1662. .name = "dpio-tx-c-23",
  1663. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1664. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1665. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1666. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1667. .ops = &vlv_dpio_power_well_ops,
  1668. .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
  1669. },
  1670. {
  1671. .name = "dpio-common",
  1672. .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
  1673. .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
  1674. .ops = &vlv_dpio_cmn_power_well_ops,
  1675. },
  1676. };
  1677. static struct i915_power_well chv_power_wells[] = {
  1678. {
  1679. .name = "always-on",
  1680. .always_on = 1,
  1681. .domains = POWER_DOMAIN_MASK,
  1682. .ops = &i9xx_always_on_power_well_ops,
  1683. },
  1684. {
  1685. .name = "display",
  1686. /*
  1687. * Pipe A power well is the new disp2d well. Pipe B and C
  1688. * power wells don't actually exist. Pipe A power well is
  1689. * required for any pipe to work.
  1690. */
  1691. .domains = CHV_DISPLAY_POWER_DOMAINS,
  1692. .id = PIPE_A,
  1693. .ops = &chv_pipe_power_well_ops,
  1694. },
  1695. {
  1696. .name = "dpio-common-bc",
  1697. .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
  1698. .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
  1699. .ops = &chv_dpio_cmn_power_well_ops,
  1700. },
  1701. {
  1702. .name = "dpio-common-d",
  1703. .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
  1704. .id = PUNIT_POWER_WELL_DPIO_CMN_D,
  1705. .ops = &chv_dpio_cmn_power_well_ops,
  1706. },
  1707. };
  1708. bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  1709. int power_well_id)
  1710. {
  1711. struct i915_power_well *power_well;
  1712. bool ret;
  1713. power_well = lookup_power_well(dev_priv, power_well_id);
  1714. ret = power_well->ops->is_enabled(dev_priv, power_well);
  1715. return ret;
  1716. }
  1717. static struct i915_power_well skl_power_wells[] = {
  1718. {
  1719. .name = "always-on",
  1720. .always_on = 1,
  1721. .domains = POWER_DOMAIN_MASK,
  1722. .ops = &i9xx_always_on_power_well_ops,
  1723. .id = SKL_DISP_PW_ALWAYS_ON,
  1724. },
  1725. {
  1726. .name = "power well 1",
  1727. /* Handled by the DMC firmware */
  1728. .domains = 0,
  1729. .ops = &skl_power_well_ops,
  1730. .id = SKL_DISP_PW_1,
  1731. },
  1732. {
  1733. .name = "MISC IO power well",
  1734. /* Handled by the DMC firmware */
  1735. .domains = 0,
  1736. .ops = &skl_power_well_ops,
  1737. .id = SKL_DISP_PW_MISC_IO,
  1738. },
  1739. {
  1740. .name = "DC off",
  1741. .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
  1742. .ops = &gen9_dc_off_power_well_ops,
  1743. .id = SKL_DISP_PW_DC_OFF,
  1744. },
  1745. {
  1746. .name = "power well 2",
  1747. .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  1748. .ops = &skl_power_well_ops,
  1749. .id = SKL_DISP_PW_2,
  1750. },
  1751. {
  1752. .name = "DDI A/E power well",
  1753. .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
  1754. .ops = &skl_power_well_ops,
  1755. .id = SKL_DISP_PW_DDI_A_E,
  1756. },
  1757. {
  1758. .name = "DDI B power well",
  1759. .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
  1760. .ops = &skl_power_well_ops,
  1761. .id = SKL_DISP_PW_DDI_B,
  1762. },
  1763. {
  1764. .name = "DDI C power well",
  1765. .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
  1766. .ops = &skl_power_well_ops,
  1767. .id = SKL_DISP_PW_DDI_C,
  1768. },
  1769. {
  1770. .name = "DDI D power well",
  1771. .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
  1772. .ops = &skl_power_well_ops,
  1773. .id = SKL_DISP_PW_DDI_D,
  1774. },
  1775. };
  1776. static struct i915_power_well bxt_power_wells[] = {
  1777. {
  1778. .name = "always-on",
  1779. .always_on = 1,
  1780. .domains = POWER_DOMAIN_MASK,
  1781. .ops = &i9xx_always_on_power_well_ops,
  1782. },
  1783. {
  1784. .name = "power well 1",
  1785. .domains = 0,
  1786. .ops = &skl_power_well_ops,
  1787. .id = SKL_DISP_PW_1,
  1788. },
  1789. {
  1790. .name = "DC off",
  1791. .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
  1792. .ops = &gen9_dc_off_power_well_ops,
  1793. .id = SKL_DISP_PW_DC_OFF,
  1794. },
  1795. {
  1796. .name = "power well 2",
  1797. .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  1798. .ops = &skl_power_well_ops,
  1799. .id = SKL_DISP_PW_2,
  1800. },
  1801. {
  1802. .name = "dpio-common-a",
  1803. .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
  1804. .ops = &bxt_dpio_cmn_power_well_ops,
  1805. .id = BXT_DPIO_CMN_A,
  1806. .data = DPIO_PHY1,
  1807. },
  1808. {
  1809. .name = "dpio-common-bc",
  1810. .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
  1811. .ops = &bxt_dpio_cmn_power_well_ops,
  1812. .id = BXT_DPIO_CMN_BC,
  1813. .data = DPIO_PHY0,
  1814. },
  1815. };
  1816. static int
  1817. sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
  1818. int disable_power_well)
  1819. {
  1820. if (disable_power_well >= 0)
  1821. return !!disable_power_well;
  1822. return 1;
  1823. }
  1824. static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
  1825. int enable_dc)
  1826. {
  1827. uint32_t mask;
  1828. int requested_dc;
  1829. int max_dc;
  1830. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  1831. max_dc = 2;
  1832. mask = 0;
  1833. } else if (IS_BROXTON(dev_priv)) {
  1834. max_dc = 1;
  1835. /*
  1836. * DC9 has a separate HW flow from the rest of the DC states,
  1837. * not depending on the DMC firmware. It's needed by system
  1838. * suspend/resume, so allow it unconditionally.
  1839. */
  1840. mask = DC_STATE_EN_DC9;
  1841. } else {
  1842. max_dc = 0;
  1843. mask = 0;
  1844. }
  1845. if (!i915.disable_power_well)
  1846. max_dc = 0;
  1847. if (enable_dc >= 0 && enable_dc <= max_dc) {
  1848. requested_dc = enable_dc;
  1849. } else if (enable_dc == -1) {
  1850. requested_dc = max_dc;
  1851. } else if (enable_dc > max_dc && enable_dc <= 2) {
  1852. DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
  1853. enable_dc, max_dc);
  1854. requested_dc = max_dc;
  1855. } else {
  1856. DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
  1857. requested_dc = max_dc;
  1858. }
  1859. if (requested_dc > 1)
  1860. mask |= DC_STATE_EN_UPTO_DC6;
  1861. if (requested_dc > 0)
  1862. mask |= DC_STATE_EN_UPTO_DC5;
  1863. DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
  1864. return mask;
  1865. }
  1866. #define set_power_wells(power_domains, __power_wells) ({ \
  1867. (power_domains)->power_wells = (__power_wells); \
  1868. (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
  1869. })
  1870. /**
  1871. * intel_power_domains_init - initializes the power domain structures
  1872. * @dev_priv: i915 device instance
  1873. *
  1874. * Initializes the power domain structures for @dev_priv depending upon the
  1875. * supported platform.
  1876. */
  1877. int intel_power_domains_init(struct drm_i915_private *dev_priv)
  1878. {
  1879. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1880. i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
  1881. i915.disable_power_well);
  1882. dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
  1883. i915.enable_dc);
  1884. BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
  1885. mutex_init(&power_domains->lock);
  1886. /*
  1887. * The enabling order will be from lower to higher indexed wells,
  1888. * the disabling order is reversed.
  1889. */
  1890. if (IS_HASWELL(dev_priv)) {
  1891. set_power_wells(power_domains, hsw_power_wells);
  1892. } else if (IS_BROADWELL(dev_priv)) {
  1893. set_power_wells(power_domains, bdw_power_wells);
  1894. } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  1895. set_power_wells(power_domains, skl_power_wells);
  1896. } else if (IS_BROXTON(dev_priv)) {
  1897. set_power_wells(power_domains, bxt_power_wells);
  1898. } else if (IS_CHERRYVIEW(dev_priv)) {
  1899. set_power_wells(power_domains, chv_power_wells);
  1900. } else if (IS_VALLEYVIEW(dev_priv)) {
  1901. set_power_wells(power_domains, vlv_power_wells);
  1902. } else {
  1903. set_power_wells(power_domains, i9xx_always_on_power_well);
  1904. }
  1905. return 0;
  1906. }
  1907. /**
  1908. * intel_power_domains_fini - finalizes the power domain structures
  1909. * @dev_priv: i915 device instance
  1910. *
  1911. * Finalizes the power domain structures for @dev_priv depending upon the
  1912. * supported platform. This function also disables runtime pm and ensures that
  1913. * the device stays powered up so that the driver can be reloaded.
  1914. */
  1915. void intel_power_domains_fini(struct drm_i915_private *dev_priv)
  1916. {
  1917. struct device *kdev = &dev_priv->drm.pdev->dev;
  1918. /*
  1919. * The i915.ko module is still not prepared to be loaded when
  1920. * the power well is not enabled, so just enable it in case
  1921. * we're going to unload/reload.
  1922. * The following also reacquires the RPM reference the core passed
  1923. * to the driver during loading, which is dropped in
  1924. * intel_runtime_pm_enable(). We have to hand back the control of the
  1925. * device to the core with this reference held.
  1926. */
  1927. intel_display_set_init_power(dev_priv, true);
  1928. /* Remove the refcount we took to keep power well support disabled. */
  1929. if (!i915.disable_power_well)
  1930. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  1931. /*
  1932. * Remove the refcount we took in intel_runtime_pm_enable() in case
  1933. * the platform doesn't support runtime PM.
  1934. */
  1935. if (!HAS_RUNTIME_PM(dev_priv))
  1936. pm_runtime_put(kdev);
  1937. }
  1938. static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
  1939. {
  1940. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1941. struct i915_power_well *power_well;
  1942. int i;
  1943. mutex_lock(&power_domains->lock);
  1944. for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
  1945. power_well->ops->sync_hw(dev_priv, power_well);
  1946. power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
  1947. power_well);
  1948. }
  1949. mutex_unlock(&power_domains->lock);
  1950. }
  1951. static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
  1952. {
  1953. I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
  1954. POSTING_READ(DBUF_CTL);
  1955. udelay(10);
  1956. if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
  1957. DRM_ERROR("DBuf power enable timeout\n");
  1958. }
  1959. static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
  1960. {
  1961. I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
  1962. POSTING_READ(DBUF_CTL);
  1963. udelay(10);
  1964. if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
  1965. DRM_ERROR("DBuf power disable timeout!\n");
  1966. }
  1967. static void skl_display_core_init(struct drm_i915_private *dev_priv,
  1968. bool resume)
  1969. {
  1970. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1971. struct i915_power_well *well;
  1972. uint32_t val;
  1973. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  1974. /* enable PCH reset handshake */
  1975. val = I915_READ(HSW_NDE_RSTWRN_OPT);
  1976. I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
  1977. /* enable PG1 and Misc I/O */
  1978. mutex_lock(&power_domains->lock);
  1979. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  1980. intel_power_well_enable(dev_priv, well);
  1981. well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
  1982. intel_power_well_enable(dev_priv, well);
  1983. mutex_unlock(&power_domains->lock);
  1984. skl_init_cdclk(dev_priv);
  1985. gen9_dbuf_enable(dev_priv);
  1986. if (resume && dev_priv->csr.dmc_payload)
  1987. intel_csr_load_program(dev_priv);
  1988. }
  1989. static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
  1990. {
  1991. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1992. struct i915_power_well *well;
  1993. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  1994. gen9_dbuf_disable(dev_priv);
  1995. skl_uninit_cdclk(dev_priv);
  1996. /* The spec doesn't call for removing the reset handshake flag */
  1997. /* disable PG1 and Misc I/O */
  1998. mutex_lock(&power_domains->lock);
  1999. well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
  2000. intel_power_well_disable(dev_priv, well);
  2001. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2002. intel_power_well_disable(dev_priv, well);
  2003. mutex_unlock(&power_domains->lock);
  2004. }
  2005. void bxt_display_core_init(struct drm_i915_private *dev_priv,
  2006. bool resume)
  2007. {
  2008. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2009. struct i915_power_well *well;
  2010. uint32_t val;
  2011. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2012. /*
  2013. * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
  2014. * or else the reset will hang because there is no PCH to respond.
  2015. * Move the handshake programming to initialization sequence.
  2016. * Previously was left up to BIOS.
  2017. */
  2018. val = I915_READ(HSW_NDE_RSTWRN_OPT);
  2019. val &= ~RESET_PCH_HANDSHAKE_ENABLE;
  2020. I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
  2021. /* Enable PG1 */
  2022. mutex_lock(&power_domains->lock);
  2023. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2024. intel_power_well_enable(dev_priv, well);
  2025. mutex_unlock(&power_domains->lock);
  2026. bxt_init_cdclk(dev_priv);
  2027. gen9_dbuf_enable(dev_priv);
  2028. if (resume && dev_priv->csr.dmc_payload)
  2029. intel_csr_load_program(dev_priv);
  2030. }
  2031. void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
  2032. {
  2033. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2034. struct i915_power_well *well;
  2035. gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
  2036. gen9_dbuf_disable(dev_priv);
  2037. bxt_uninit_cdclk(dev_priv);
  2038. /* The spec doesn't call for removing the reset handshake flag */
  2039. /* Disable PG1 */
  2040. mutex_lock(&power_domains->lock);
  2041. well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
  2042. intel_power_well_disable(dev_priv, well);
  2043. mutex_unlock(&power_domains->lock);
  2044. }
  2045. static void chv_phy_control_init(struct drm_i915_private *dev_priv)
  2046. {
  2047. struct i915_power_well *cmn_bc =
  2048. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  2049. struct i915_power_well *cmn_d =
  2050. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
  2051. /*
  2052. * DISPLAY_PHY_CONTROL can get corrupted if read. As a
  2053. * workaround never ever read DISPLAY_PHY_CONTROL, and
  2054. * instead maintain a shadow copy ourselves. Use the actual
  2055. * power well state and lane status to reconstruct the
  2056. * expected initial value.
  2057. */
  2058. dev_priv->chv_phy_control =
  2059. PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
  2060. PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
  2061. PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
  2062. PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
  2063. PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
  2064. /*
  2065. * If all lanes are disabled we leave the override disabled
  2066. * with all power down bits cleared to match the state we
  2067. * would use after disabling the port. Otherwise enable the
  2068. * override and set the lane powerdown bits accding to the
  2069. * current lane status.
  2070. */
  2071. if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
  2072. uint32_t status = I915_READ(DPLL(PIPE_A));
  2073. unsigned int mask;
  2074. mask = status & DPLL_PORTB_READY_MASK;
  2075. if (mask == 0xf)
  2076. mask = 0x0;
  2077. else
  2078. dev_priv->chv_phy_control |=
  2079. PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
  2080. dev_priv->chv_phy_control |=
  2081. PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
  2082. mask = (status & DPLL_PORTC_READY_MASK) >> 4;
  2083. if (mask == 0xf)
  2084. mask = 0x0;
  2085. else
  2086. dev_priv->chv_phy_control |=
  2087. PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
  2088. dev_priv->chv_phy_control |=
  2089. PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
  2090. dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
  2091. dev_priv->chv_phy_assert[DPIO_PHY0] = false;
  2092. } else {
  2093. dev_priv->chv_phy_assert[DPIO_PHY0] = true;
  2094. }
  2095. if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
  2096. uint32_t status = I915_READ(DPIO_PHY_STATUS);
  2097. unsigned int mask;
  2098. mask = status & DPLL_PORTD_READY_MASK;
  2099. if (mask == 0xf)
  2100. mask = 0x0;
  2101. else
  2102. dev_priv->chv_phy_control |=
  2103. PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
  2104. dev_priv->chv_phy_control |=
  2105. PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
  2106. dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
  2107. dev_priv->chv_phy_assert[DPIO_PHY1] = false;
  2108. } else {
  2109. dev_priv->chv_phy_assert[DPIO_PHY1] = true;
  2110. }
  2111. I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
  2112. DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
  2113. dev_priv->chv_phy_control);
  2114. }
  2115. static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
  2116. {
  2117. struct i915_power_well *cmn =
  2118. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  2119. struct i915_power_well *disp2d =
  2120. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
  2121. /* If the display might be already active skip this */
  2122. if (cmn->ops->is_enabled(dev_priv, cmn) &&
  2123. disp2d->ops->is_enabled(dev_priv, disp2d) &&
  2124. I915_READ(DPIO_CTL) & DPIO_CMNRST)
  2125. return;
  2126. DRM_DEBUG_KMS("toggling display PHY side reset\n");
  2127. /* cmnlane needs DPLL registers */
  2128. disp2d->ops->enable(dev_priv, disp2d);
  2129. /*
  2130. * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
  2131. * Need to assert and de-assert PHY SB reset by gating the
  2132. * common lane power, then un-gating it.
  2133. * Simply ungating isn't enough to reset the PHY enough to get
  2134. * ports and lanes running.
  2135. */
  2136. cmn->ops->disable(dev_priv, cmn);
  2137. }
  2138. /**
  2139. * intel_power_domains_init_hw - initialize hardware power domain state
  2140. * @dev_priv: i915 device instance
  2141. * @resume: Called from resume code paths or not
  2142. *
  2143. * This function initializes the hardware power domain state and enables all
  2144. * power domains using intel_display_set_init_power().
  2145. */
  2146. void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
  2147. {
  2148. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  2149. power_domains->initializing = true;
  2150. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  2151. skl_display_core_init(dev_priv, resume);
  2152. } else if (IS_BROXTON(dev_priv)) {
  2153. bxt_display_core_init(dev_priv, resume);
  2154. } else if (IS_CHERRYVIEW(dev_priv)) {
  2155. mutex_lock(&power_domains->lock);
  2156. chv_phy_control_init(dev_priv);
  2157. mutex_unlock(&power_domains->lock);
  2158. } else if (IS_VALLEYVIEW(dev_priv)) {
  2159. mutex_lock(&power_domains->lock);
  2160. vlv_cmnlane_wa(dev_priv);
  2161. mutex_unlock(&power_domains->lock);
  2162. }
  2163. /* For now, we need the power well to be always enabled. */
  2164. intel_display_set_init_power(dev_priv, true);
  2165. /* Disable power support if the user asked so. */
  2166. if (!i915.disable_power_well)
  2167. intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  2168. intel_power_domains_sync_hw(dev_priv);
  2169. power_domains->initializing = false;
  2170. }
  2171. /**
  2172. * intel_power_domains_suspend - suspend power domain state
  2173. * @dev_priv: i915 device instance
  2174. *
  2175. * This function prepares the hardware power domain state before entering
  2176. * system suspend. It must be paired with intel_power_domains_init_hw().
  2177. */
  2178. void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
  2179. {
  2180. /*
  2181. * Even if power well support was disabled we still want to disable
  2182. * power wells while we are system suspended.
  2183. */
  2184. if (!i915.disable_power_well)
  2185. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  2186. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  2187. skl_display_core_uninit(dev_priv);
  2188. else if (IS_BROXTON(dev_priv))
  2189. bxt_display_core_uninit(dev_priv);
  2190. }
  2191. /**
  2192. * intel_runtime_pm_get - grab a runtime pm reference
  2193. * @dev_priv: i915 device instance
  2194. *
  2195. * This function grabs a device-level runtime pm reference (mostly used for GEM
  2196. * code to ensure the GTT or GT is on) and ensures that it is powered up.
  2197. *
  2198. * Any runtime pm reference obtained by this function must have a symmetric
  2199. * call to intel_runtime_pm_put() to release the reference again.
  2200. */
  2201. void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  2202. {
  2203. struct pci_dev *pdev = dev_priv->drm.pdev;
  2204. struct device *kdev = &pdev->dev;
  2205. pm_runtime_get_sync(kdev);
  2206. atomic_inc(&dev_priv->pm.wakeref_count);
  2207. assert_rpm_wakelock_held(dev_priv);
  2208. }
  2209. /**
  2210. * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
  2211. * @dev_priv: i915 device instance
  2212. *
  2213. * This function grabs a device-level runtime pm reference if the device is
  2214. * already in use and ensures that it is powered up.
  2215. *
  2216. * Any runtime pm reference obtained by this function must have a symmetric
  2217. * call to intel_runtime_pm_put() to release the reference again.
  2218. */
  2219. bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
  2220. {
  2221. struct pci_dev *pdev = dev_priv->drm.pdev;
  2222. struct device *kdev = &pdev->dev;
  2223. if (IS_ENABLED(CONFIG_PM)) {
  2224. int ret = pm_runtime_get_if_in_use(kdev);
  2225. /*
  2226. * In cases runtime PM is disabled by the RPM core and we get
  2227. * an -EINVAL return value we are not supposed to call this
  2228. * function, since the power state is undefined. This applies
  2229. * atm to the late/early system suspend/resume handlers.
  2230. */
  2231. WARN_ON_ONCE(ret < 0);
  2232. if (ret <= 0)
  2233. return false;
  2234. }
  2235. atomic_inc(&dev_priv->pm.wakeref_count);
  2236. assert_rpm_wakelock_held(dev_priv);
  2237. return true;
  2238. }
  2239. /**
  2240. * intel_runtime_pm_get_noresume - grab a runtime pm reference
  2241. * @dev_priv: i915 device instance
  2242. *
  2243. * This function grabs a device-level runtime pm reference (mostly used for GEM
  2244. * code to ensure the GTT or GT is on).
  2245. *
  2246. * It will _not_ power up the device but instead only check that it's powered
  2247. * on. Therefore it is only valid to call this functions from contexts where
  2248. * the device is known to be powered up and where trying to power it up would
  2249. * result in hilarity and deadlocks. That pretty much means only the system
  2250. * suspend/resume code where this is used to grab runtime pm references for
  2251. * delayed setup down in work items.
  2252. *
  2253. * Any runtime pm reference obtained by this function must have a symmetric
  2254. * call to intel_runtime_pm_put() to release the reference again.
  2255. */
  2256. void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  2257. {
  2258. struct pci_dev *pdev = dev_priv->drm.pdev;
  2259. struct device *kdev = &pdev->dev;
  2260. assert_rpm_wakelock_held(dev_priv);
  2261. pm_runtime_get_noresume(kdev);
  2262. atomic_inc(&dev_priv->pm.wakeref_count);
  2263. }
  2264. /**
  2265. * intel_runtime_pm_put - release a runtime pm reference
  2266. * @dev_priv: i915 device instance
  2267. *
  2268. * This function drops the device-level runtime pm reference obtained by
  2269. * intel_runtime_pm_get() and might power down the corresponding
  2270. * hardware block right away if this is the last reference.
  2271. */
  2272. void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  2273. {
  2274. struct pci_dev *pdev = dev_priv->drm.pdev;
  2275. struct device *kdev = &pdev->dev;
  2276. assert_rpm_wakelock_held(dev_priv);
  2277. atomic_dec(&dev_priv->pm.wakeref_count);
  2278. pm_runtime_mark_last_busy(kdev);
  2279. pm_runtime_put_autosuspend(kdev);
  2280. }
  2281. /**
  2282. * intel_runtime_pm_enable - enable runtime pm
  2283. * @dev_priv: i915 device instance
  2284. *
  2285. * This function enables runtime pm at the end of the driver load sequence.
  2286. *
  2287. * Note that this function does currently not enable runtime pm for the
  2288. * subordinate display power domains. That is only done on the first modeset
  2289. * using intel_display_set_init_power().
  2290. */
  2291. void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
  2292. {
  2293. struct pci_dev *pdev = dev_priv->drm.pdev;
  2294. struct device *kdev = &pdev->dev;
  2295. pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
  2296. pm_runtime_mark_last_busy(kdev);
  2297. /*
  2298. * Take a permanent reference to disable the RPM functionality and drop
  2299. * it only when unloading the driver. Use the low level get/put helpers,
  2300. * so the driver's own RPM reference tracking asserts also work on
  2301. * platforms without RPM support.
  2302. */
  2303. if (!HAS_RUNTIME_PM(dev_priv)) {
  2304. pm_runtime_dont_use_autosuspend(kdev);
  2305. pm_runtime_get_sync(kdev);
  2306. } else {
  2307. pm_runtime_use_autosuspend(kdev);
  2308. }
  2309. /*
  2310. * The core calls the driver load handler with an RPM reference held.
  2311. * We drop that here and will reacquire it during unloading in
  2312. * intel_power_domains_fini().
  2313. */
  2314. pm_runtime_put_autosuspend(kdev);
  2315. }