intel_runtime_pm.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715
  1. /*
  2. * Copyright © 2012-2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eugeni Dodonov <eugeni.dodonov@intel.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. *
  27. */
  28. #include <linux/pm_runtime.h>
  29. #include <linux/vgaarb.h>
  30. #include "i915_drv.h"
  31. #include "intel_drv.h"
  32. /**
  33. * DOC: runtime pm
  34. *
  35. * The i915 driver supports dynamic enabling and disabling of entire hardware
  36. * blocks at runtime. This is especially important on the display side where
  37. * software is supposed to control many power gates manually on recent hardware,
  38. * since on the GT side a lot of the power management is done by the hardware.
  39. * But even there some manual control at the device level is required.
  40. *
  41. * Since i915 supports a diverse set of platforms with a unified codebase and
  42. * hardware engineers just love to shuffle functionality around between power
  43. * domains there's a sizeable amount of indirection required. This file provides
  44. * generic functions to the driver for grabbing and releasing references for
  45. * abstract power domains. It then maps those to the actual power wells
  46. * present for a given platform.
  47. */
  48. #define for_each_power_well(i, power_well, domain_mask, power_domains) \
  49. for (i = 0; \
  50. i < (power_domains)->power_well_count && \
  51. ((power_well) = &(power_domains)->power_wells[i]); \
  52. i++) \
  53. if ((power_well)->domains & (domain_mask))
  54. #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
  55. for (i = (power_domains)->power_well_count - 1; \
  56. i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
  57. i--) \
  58. if ((power_well)->domains & (domain_mask))
  59. /*
  60. * We should only use the power well if we explicitly asked the hardware to
  61. * enable it, so check if it's enabled and also check if we've requested it to
  62. * be enabled.
  63. */
  64. static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
  65. struct i915_power_well *power_well)
  66. {
  67. return I915_READ(HSW_PWR_WELL_DRIVER) ==
  68. (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
  69. }
  70. /**
  71. * __intel_display_power_is_enabled - unlocked check for a power domain
  72. * @dev_priv: i915 device instance
  73. * @domain: power domain to check
  74. *
  75. * This is the unlocked version of intel_display_power_is_enabled() and should
  76. * only be used from error capture and recovery code where deadlocks are
  77. * possible.
  78. *
  79. * Returns:
  80. * True when the power domain is enabled, false otherwise.
  81. */
  82. bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  83. enum intel_display_power_domain domain)
  84. {
  85. struct i915_power_domains *power_domains;
  86. struct i915_power_well *power_well;
  87. bool is_enabled;
  88. int i;
  89. if (dev_priv->pm.suspended)
  90. return false;
  91. power_domains = &dev_priv->power_domains;
  92. is_enabled = true;
  93. for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
  94. if (power_well->always_on)
  95. continue;
  96. if (!power_well->hw_enabled) {
  97. is_enabled = false;
  98. break;
  99. }
  100. }
  101. return is_enabled;
  102. }
  103. /**
  104. * intel_display_power_is_enabled - check for a power domain
  105. * @dev_priv: i915 device instance
  106. * @domain: power domain to check
  107. *
  108. * This function can be used to check the hw power domain state. It is mostly
  109. * used in hardware state readout functions. Everywhere else code should rely
  110. * upon explicit power domain reference counting to ensure that the hardware
  111. * block is powered up before accessing it.
  112. *
  113. * Callers must hold the relevant modesetting locks to ensure that concurrent
  114. * threads can't disable the power well while the caller tries to read a few
  115. * registers.
  116. *
  117. * Returns:
  118. * True when the power domain is enabled, false otherwise.
  119. */
  120. bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  121. enum intel_display_power_domain domain)
  122. {
  123. struct i915_power_domains *power_domains;
  124. bool ret;
  125. power_domains = &dev_priv->power_domains;
  126. mutex_lock(&power_domains->lock);
  127. ret = __intel_display_power_is_enabled(dev_priv, domain);
  128. mutex_unlock(&power_domains->lock);
  129. return ret;
  130. }
  131. /**
  132. * intel_display_set_init_power - set the initial power domain state
  133. * @dev_priv: i915 device instance
  134. * @enable: whether to enable or disable the initial power domain state
  135. *
  136. * For simplicity our driver load/unload and system suspend/resume code assumes
  137. * that all power domains are always enabled. This functions controls the state
  138. * of this little hack. While the initial power domain state is enabled runtime
  139. * pm is effectively disabled.
  140. */
  141. void intel_display_set_init_power(struct drm_i915_private *dev_priv,
  142. bool enable)
  143. {
  144. if (dev_priv->power_domains.init_power_on == enable)
  145. return;
  146. if (enable)
  147. intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  148. else
  149. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  150. dev_priv->power_domains.init_power_on = enable;
  151. }
  152. /*
  153. * Starting with Haswell, we have a "Power Down Well" that can be turned off
  154. * when not needed anymore. We have 4 registers that can request the power well
  155. * to be enabled, and it will only be disabled if none of the registers is
  156. * requesting it to be enabled.
  157. */
  158. static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
  159. {
  160. struct drm_device *dev = dev_priv->dev;
  161. /*
  162. * After we re-enable the power well, if we touch VGA register 0x3d5
  163. * we'll get unclaimed register interrupts. This stops after we write
  164. * anything to the VGA MSR register. The vgacon module uses this
  165. * register all the time, so if we unbind our driver and, as a
  166. * consequence, bind vgacon, we'll get stuck in an infinite loop at
  167. * console_unlock(). So make here we touch the VGA MSR register, making
  168. * sure vgacon can keep working normally without triggering interrupts
  169. * and error messages.
  170. */
  171. vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
  172. outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  173. vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
  174. if (IS_BROADWELL(dev))
  175. gen8_irq_power_well_post_enable(dev_priv,
  176. 1 << PIPE_C | 1 << PIPE_B);
  177. }
  178. static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
  179. struct i915_power_well *power_well)
  180. {
  181. struct drm_device *dev = dev_priv->dev;
  182. /*
  183. * After we re-enable the power well, if we touch VGA register 0x3d5
  184. * we'll get unclaimed register interrupts. This stops after we write
  185. * anything to the VGA MSR register. The vgacon module uses this
  186. * register all the time, so if we unbind our driver and, as a
  187. * consequence, bind vgacon, we'll get stuck in an infinite loop at
  188. * console_unlock(). So make here we touch the VGA MSR register, making
  189. * sure vgacon can keep working normally without triggering interrupts
  190. * and error messages.
  191. */
  192. if (power_well->data == SKL_DISP_PW_2) {
  193. vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
  194. outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  195. vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
  196. gen8_irq_power_well_post_enable(dev_priv,
  197. 1 << PIPE_C | 1 << PIPE_B);
  198. }
  199. if (power_well->data == SKL_DISP_PW_1) {
  200. intel_prepare_ddi(dev);
  201. gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
  202. }
  203. }
  204. static void hsw_set_power_well(struct drm_i915_private *dev_priv,
  205. struct i915_power_well *power_well, bool enable)
  206. {
  207. bool is_enabled, enable_requested;
  208. uint32_t tmp;
  209. tmp = I915_READ(HSW_PWR_WELL_DRIVER);
  210. is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
  211. enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
  212. if (enable) {
  213. if (!enable_requested)
  214. I915_WRITE(HSW_PWR_WELL_DRIVER,
  215. HSW_PWR_WELL_ENABLE_REQUEST);
  216. if (!is_enabled) {
  217. DRM_DEBUG_KMS("Enabling power well\n");
  218. if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
  219. HSW_PWR_WELL_STATE_ENABLED), 20))
  220. DRM_ERROR("Timeout enabling power well\n");
  221. hsw_power_well_post_enable(dev_priv);
  222. }
  223. } else {
  224. if (enable_requested) {
  225. I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
  226. POSTING_READ(HSW_PWR_WELL_DRIVER);
  227. DRM_DEBUG_KMS("Requesting to disable the power well\n");
  228. }
  229. }
  230. }
  231. #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  232. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  233. BIT(POWER_DOMAIN_PIPE_B) | \
  234. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  235. BIT(POWER_DOMAIN_PIPE_C) | \
  236. BIT(POWER_DOMAIN_TRANSCODER_C) | \
  237. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  238. BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  239. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  240. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  241. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  242. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  243. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  244. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  245. BIT(POWER_DOMAIN_AUX_B) | \
  246. BIT(POWER_DOMAIN_AUX_C) | \
  247. BIT(POWER_DOMAIN_AUX_D) | \
  248. BIT(POWER_DOMAIN_AUDIO) | \
  249. BIT(POWER_DOMAIN_VGA) | \
  250. BIT(POWER_DOMAIN_INIT))
  251. #define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
  252. SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  253. BIT(POWER_DOMAIN_PLLS) | \
  254. BIT(POWER_DOMAIN_PIPE_A) | \
  255. BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
  256. BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  257. BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
  258. BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
  259. BIT(POWER_DOMAIN_AUX_A) | \
  260. BIT(POWER_DOMAIN_INIT))
  261. #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
  262. BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
  263. BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
  264. BIT(POWER_DOMAIN_INIT))
  265. #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
  266. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  267. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  268. BIT(POWER_DOMAIN_INIT))
  269. #define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
  270. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  271. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  272. BIT(POWER_DOMAIN_INIT))
  273. #define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
  274. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  275. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  276. BIT(POWER_DOMAIN_INIT))
  277. #define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
  278. SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
  279. #define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
  280. (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
  281. SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  282. SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \
  283. SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
  284. SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
  285. SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
  286. SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
  287. BIT(POWER_DOMAIN_INIT))
  288. #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  289. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  290. BIT(POWER_DOMAIN_PIPE_B) | \
  291. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  292. BIT(POWER_DOMAIN_PIPE_C) | \
  293. BIT(POWER_DOMAIN_TRANSCODER_C) | \
  294. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  295. BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  296. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  297. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  298. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  299. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  300. BIT(POWER_DOMAIN_AUX_B) | \
  301. BIT(POWER_DOMAIN_AUX_C) | \
  302. BIT(POWER_DOMAIN_AUDIO) | \
  303. BIT(POWER_DOMAIN_VGA) | \
  304. BIT(POWER_DOMAIN_INIT))
  305. #define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
  306. BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  307. BIT(POWER_DOMAIN_PIPE_A) | \
  308. BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
  309. BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  310. BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
  311. BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
  312. BIT(POWER_DOMAIN_AUX_A) | \
  313. BIT(POWER_DOMAIN_PLLS) | \
  314. BIT(POWER_DOMAIN_INIT))
  315. #define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
  316. (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
  317. BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \
  318. BIT(POWER_DOMAIN_INIT))
  319. static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
  320. {
  321. struct drm_device *dev = dev_priv->dev;
  322. WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
  323. WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
  324. "DC9 already programmed to be enabled.\n");
  325. WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
  326. "DC5 still not disabled to enable DC9.\n");
  327. WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
  328. WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
  329. /*
  330. * TODO: check for the following to verify the conditions to enter DC9
  331. * state are satisfied:
  332. * 1] Check relevant display engine registers to verify if mode set
  333. * disable sequence was followed.
  334. * 2] Check if display uninitialize sequence is initialized.
  335. */
  336. }
  337. static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
  338. {
  339. WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
  340. WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
  341. "DC9 already programmed to be disabled.\n");
  342. WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
  343. "DC5 still not disabled.\n");
  344. /*
  345. * TODO: check for the following to verify DC9 state was indeed
  346. * entered before programming to disable it:
  347. * 1] Check relevant display engine registers to verify if mode
  348. * set disable sequence was followed.
  349. * 2] Check if display uninitialize sequence is initialized.
  350. */
  351. }
  352. void bxt_enable_dc9(struct drm_i915_private *dev_priv)
  353. {
  354. uint32_t val;
  355. assert_can_enable_dc9(dev_priv);
  356. DRM_DEBUG_KMS("Enabling DC9\n");
  357. val = I915_READ(DC_STATE_EN);
  358. val |= DC_STATE_EN_DC9;
  359. I915_WRITE(DC_STATE_EN, val);
  360. POSTING_READ(DC_STATE_EN);
  361. }
  362. void bxt_disable_dc9(struct drm_i915_private *dev_priv)
  363. {
  364. uint32_t val;
  365. assert_can_disable_dc9(dev_priv);
  366. DRM_DEBUG_KMS("Disabling DC9\n");
  367. val = I915_READ(DC_STATE_EN);
  368. val &= ~DC_STATE_EN_DC9;
  369. I915_WRITE(DC_STATE_EN, val);
  370. POSTING_READ(DC_STATE_EN);
  371. }
  372. static void skl_set_power_well(struct drm_i915_private *dev_priv,
  373. struct i915_power_well *power_well, bool enable)
  374. {
  375. uint32_t tmp, fuse_status;
  376. uint32_t req_mask, state_mask;
  377. bool is_enabled, enable_requested, check_fuse_status = false;
  378. tmp = I915_READ(HSW_PWR_WELL_DRIVER);
  379. fuse_status = I915_READ(SKL_FUSE_STATUS);
  380. switch (power_well->data) {
  381. case SKL_DISP_PW_1:
  382. if (wait_for((I915_READ(SKL_FUSE_STATUS) &
  383. SKL_FUSE_PG0_DIST_STATUS), 1)) {
  384. DRM_ERROR("PG0 not enabled\n");
  385. return;
  386. }
  387. break;
  388. case SKL_DISP_PW_2:
  389. if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
  390. DRM_ERROR("PG1 in disabled state\n");
  391. return;
  392. }
  393. break;
  394. case SKL_DISP_PW_DDI_A_E:
  395. case SKL_DISP_PW_DDI_B:
  396. case SKL_DISP_PW_DDI_C:
  397. case SKL_DISP_PW_DDI_D:
  398. case SKL_DISP_PW_MISC_IO:
  399. break;
  400. default:
  401. WARN(1, "Unknown power well %lu\n", power_well->data);
  402. return;
  403. }
  404. req_mask = SKL_POWER_WELL_REQ(power_well->data);
  405. enable_requested = tmp & req_mask;
  406. state_mask = SKL_POWER_WELL_STATE(power_well->data);
  407. is_enabled = tmp & state_mask;
  408. if (enable) {
  409. if (!enable_requested) {
  410. I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
  411. }
  412. if (!is_enabled) {
  413. DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
  414. if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
  415. state_mask), 1))
  416. DRM_ERROR("%s enable timeout\n",
  417. power_well->name);
  418. check_fuse_status = true;
  419. }
  420. } else {
  421. if (enable_requested) {
  422. I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
  423. POSTING_READ(HSW_PWR_WELL_DRIVER);
  424. DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
  425. }
  426. }
  427. if (check_fuse_status) {
  428. if (power_well->data == SKL_DISP_PW_1) {
  429. if (wait_for((I915_READ(SKL_FUSE_STATUS) &
  430. SKL_FUSE_PG1_DIST_STATUS), 1))
  431. DRM_ERROR("PG1 distributing status timeout\n");
  432. } else if (power_well->data == SKL_DISP_PW_2) {
  433. if (wait_for((I915_READ(SKL_FUSE_STATUS) &
  434. SKL_FUSE_PG2_DIST_STATUS), 1))
  435. DRM_ERROR("PG2 distributing status timeout\n");
  436. }
  437. }
  438. if (enable && !is_enabled)
  439. skl_power_well_post_enable(dev_priv, power_well);
  440. }
  441. static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
  442. struct i915_power_well *power_well)
  443. {
  444. hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
  445. /*
  446. * We're taking over the BIOS, so clear any requests made by it since
  447. * the driver is in charge now.
  448. */
  449. if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
  450. I915_WRITE(HSW_PWR_WELL_BIOS, 0);
  451. }
  452. static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
  453. struct i915_power_well *power_well)
  454. {
  455. hsw_set_power_well(dev_priv, power_well, true);
  456. }
  457. static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
  458. struct i915_power_well *power_well)
  459. {
  460. hsw_set_power_well(dev_priv, power_well, false);
  461. }
  462. static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
  463. struct i915_power_well *power_well)
  464. {
  465. uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
  466. SKL_POWER_WELL_STATE(power_well->data);
  467. return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
  468. }
  469. static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
  470. struct i915_power_well *power_well)
  471. {
  472. skl_set_power_well(dev_priv, power_well, power_well->count > 0);
  473. /* Clear any request made by BIOS as driver is taking over */
  474. I915_WRITE(HSW_PWR_WELL_BIOS, 0);
  475. }
  476. static void skl_power_well_enable(struct drm_i915_private *dev_priv,
  477. struct i915_power_well *power_well)
  478. {
  479. skl_set_power_well(dev_priv, power_well, true);
  480. }
  481. static void skl_power_well_disable(struct drm_i915_private *dev_priv,
  482. struct i915_power_well *power_well)
  483. {
  484. skl_set_power_well(dev_priv, power_well, false);
  485. }
  486. static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
  487. struct i915_power_well *power_well)
  488. {
  489. }
  490. static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
  491. struct i915_power_well *power_well)
  492. {
  493. return true;
  494. }
  495. static void vlv_set_power_well(struct drm_i915_private *dev_priv,
  496. struct i915_power_well *power_well, bool enable)
  497. {
  498. enum punit_power_well power_well_id = power_well->data;
  499. u32 mask;
  500. u32 state;
  501. u32 ctrl;
  502. mask = PUNIT_PWRGT_MASK(power_well_id);
  503. state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
  504. PUNIT_PWRGT_PWR_GATE(power_well_id);
  505. mutex_lock(&dev_priv->rps.hw_lock);
  506. #define COND \
  507. ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
  508. if (COND)
  509. goto out;
  510. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
  511. ctrl &= ~mask;
  512. ctrl |= state;
  513. vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
  514. if (wait_for(COND, 100))
  515. DRM_ERROR("timout setting power well state %08x (%08x)\n",
  516. state,
  517. vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
  518. #undef COND
  519. out:
  520. mutex_unlock(&dev_priv->rps.hw_lock);
  521. }
  522. static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
  523. struct i915_power_well *power_well)
  524. {
  525. vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
  526. }
  527. static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
  528. struct i915_power_well *power_well)
  529. {
  530. vlv_set_power_well(dev_priv, power_well, true);
  531. }
  532. static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
  533. struct i915_power_well *power_well)
  534. {
  535. vlv_set_power_well(dev_priv, power_well, false);
  536. }
  537. static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
  538. struct i915_power_well *power_well)
  539. {
  540. int power_well_id = power_well->data;
  541. bool enabled = false;
  542. u32 mask;
  543. u32 state;
  544. u32 ctrl;
  545. mask = PUNIT_PWRGT_MASK(power_well_id);
  546. ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
  547. mutex_lock(&dev_priv->rps.hw_lock);
  548. state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
  549. /*
  550. * We only ever set the power-on and power-gate states, anything
  551. * else is unexpected.
  552. */
  553. WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
  554. state != PUNIT_PWRGT_PWR_GATE(power_well_id));
  555. if (state == ctrl)
  556. enabled = true;
  557. /*
  558. * A transient state at this point would mean some unexpected party
  559. * is poking at the power controls too.
  560. */
  561. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
  562. WARN_ON(ctrl != state);
  563. mutex_unlock(&dev_priv->rps.hw_lock);
  564. return enabled;
  565. }
  566. static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
  567. struct i915_power_well *power_well)
  568. {
  569. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
  570. vlv_set_power_well(dev_priv, power_well, true);
  571. spin_lock_irq(&dev_priv->irq_lock);
  572. valleyview_enable_display_irqs(dev_priv);
  573. spin_unlock_irq(&dev_priv->irq_lock);
  574. /*
  575. * During driver initialization/resume we can avoid restoring the
  576. * part of the HW/SW state that will be inited anyway explicitly.
  577. */
  578. if (dev_priv->power_domains.initializing)
  579. return;
  580. intel_hpd_init(dev_priv);
  581. i915_redisable_vga_power_on(dev_priv->dev);
  582. }
  583. static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
  584. struct i915_power_well *power_well)
  585. {
  586. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
  587. spin_lock_irq(&dev_priv->irq_lock);
  588. valleyview_disable_display_irqs(dev_priv);
  589. spin_unlock_irq(&dev_priv->irq_lock);
  590. vlv_set_power_well(dev_priv, power_well, false);
  591. vlv_power_sequencer_reset(dev_priv);
  592. }
  593. static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  594. struct i915_power_well *power_well)
  595. {
  596. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
  597. /*
  598. * Enable the CRI clock source so we can get at the
  599. * display and the reference clock for VGA
  600. * hotplug / manual detection.
  601. */
  602. I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
  603. DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
  604. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  605. vlv_set_power_well(dev_priv, power_well, true);
  606. /*
  607. * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
  608. * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
  609. * a. GUnit 0x2110 bit[0] set to 1 (def 0)
  610. * b. The other bits such as sfr settings / modesel may all
  611. * be set to 0.
  612. *
  613. * This should only be done on init and resume from S3 with
  614. * both PLLs disabled, or we risk losing DPIO and PLL
  615. * synchronization.
  616. */
  617. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
  618. }
  619. static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  620. struct i915_power_well *power_well)
  621. {
  622. enum pipe pipe;
  623. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
  624. for_each_pipe(dev_priv, pipe)
  625. assert_pll_disabled(dev_priv, pipe);
  626. /* Assert common reset */
  627. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
  628. vlv_set_power_well(dev_priv, power_well, false);
  629. }
  630. static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  631. struct i915_power_well *power_well)
  632. {
  633. enum dpio_phy phy;
  634. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  635. power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
  636. /*
  637. * Enable the CRI clock source so we can get at the
  638. * display and the reference clock for VGA
  639. * hotplug / manual detection.
  640. */
  641. if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  642. phy = DPIO_PHY0;
  643. I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
  644. DPLL_REFA_CLK_ENABLE_VLV);
  645. I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
  646. DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
  647. } else {
  648. phy = DPIO_PHY1;
  649. I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
  650. DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
  651. }
  652. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  653. vlv_set_power_well(dev_priv, power_well, true);
  654. /* Poll for phypwrgood signal */
  655. if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
  656. DRM_ERROR("Display PHY %d is not power up\n", phy);
  657. I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
  658. PHY_COM_LANE_RESET_DEASSERT(phy));
  659. }
  660. static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  661. struct i915_power_well *power_well)
  662. {
  663. enum dpio_phy phy;
  664. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  665. power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
  666. if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  667. phy = DPIO_PHY0;
  668. assert_pll_disabled(dev_priv, PIPE_A);
  669. assert_pll_disabled(dev_priv, PIPE_B);
  670. } else {
  671. phy = DPIO_PHY1;
  672. assert_pll_disabled(dev_priv, PIPE_C);
  673. }
  674. I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
  675. ~PHY_COM_LANE_RESET_DEASSERT(phy));
  676. vlv_set_power_well(dev_priv, power_well, false);
  677. }
  678. static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
  679. struct i915_power_well *power_well)
  680. {
  681. enum pipe pipe = power_well->data;
  682. bool enabled;
  683. u32 state, ctrl;
  684. mutex_lock(&dev_priv->rps.hw_lock);
  685. state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
  686. /*
  687. * We only ever set the power-on and power-gate states, anything
  688. * else is unexpected.
  689. */
  690. WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
  691. enabled = state == DP_SSS_PWR_ON(pipe);
  692. /*
  693. * A transient state at this point would mean some unexpected party
  694. * is poking at the power controls too.
  695. */
  696. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
  697. WARN_ON(ctrl << 16 != state);
  698. mutex_unlock(&dev_priv->rps.hw_lock);
  699. return enabled;
  700. }
  701. static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
  702. struct i915_power_well *power_well,
  703. bool enable)
  704. {
  705. enum pipe pipe = power_well->data;
  706. u32 state;
  707. u32 ctrl;
  708. state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
  709. mutex_lock(&dev_priv->rps.hw_lock);
  710. #define COND \
  711. ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
  712. if (COND)
  713. goto out;
  714. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  715. ctrl &= ~DP_SSC_MASK(pipe);
  716. ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
  717. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
  718. if (wait_for(COND, 100))
  719. DRM_ERROR("timout setting power well state %08x (%08x)\n",
  720. state,
  721. vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
  722. #undef COND
  723. out:
  724. mutex_unlock(&dev_priv->rps.hw_lock);
  725. }
  726. static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
  727. struct i915_power_well *power_well)
  728. {
  729. chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
  730. }
  731. static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
  732. struct i915_power_well *power_well)
  733. {
  734. WARN_ON_ONCE(power_well->data != PIPE_A &&
  735. power_well->data != PIPE_B &&
  736. power_well->data != PIPE_C);
  737. chv_set_pipe_power_well(dev_priv, power_well, true);
  738. if (power_well->data == PIPE_A) {
  739. spin_lock_irq(&dev_priv->irq_lock);
  740. valleyview_enable_display_irqs(dev_priv);
  741. spin_unlock_irq(&dev_priv->irq_lock);
  742. /*
  743. * During driver initialization/resume we can avoid restoring the
  744. * part of the HW/SW state that will be inited anyway explicitly.
  745. */
  746. if (dev_priv->power_domains.initializing)
  747. return;
  748. intel_hpd_init(dev_priv);
  749. i915_redisable_vga_power_on(dev_priv->dev);
  750. }
  751. }
  752. static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
  753. struct i915_power_well *power_well)
  754. {
  755. WARN_ON_ONCE(power_well->data != PIPE_A &&
  756. power_well->data != PIPE_B &&
  757. power_well->data != PIPE_C);
  758. if (power_well->data == PIPE_A) {
  759. spin_lock_irq(&dev_priv->irq_lock);
  760. valleyview_disable_display_irqs(dev_priv);
  761. spin_unlock_irq(&dev_priv->irq_lock);
  762. }
  763. chv_set_pipe_power_well(dev_priv, power_well, false);
  764. if (power_well->data == PIPE_A)
  765. vlv_power_sequencer_reset(dev_priv);
  766. }
  767. /**
  768. * intel_display_power_get - grab a power domain reference
  769. * @dev_priv: i915 device instance
  770. * @domain: power domain to reference
  771. *
  772. * This function grabs a power domain reference for @domain and ensures that the
  773. * power domain and all its parents are powered up. Therefore users should only
  774. * grab a reference to the innermost power domain they need.
  775. *
  776. * Any power domain reference obtained by this function must have a symmetric
  777. * call to intel_display_power_put() to release the reference again.
  778. */
  779. void intel_display_power_get(struct drm_i915_private *dev_priv,
  780. enum intel_display_power_domain domain)
  781. {
  782. struct i915_power_domains *power_domains;
  783. struct i915_power_well *power_well;
  784. int i;
  785. intel_runtime_pm_get(dev_priv);
  786. power_domains = &dev_priv->power_domains;
  787. mutex_lock(&power_domains->lock);
  788. for_each_power_well(i, power_well, BIT(domain), power_domains) {
  789. if (!power_well->count++) {
  790. DRM_DEBUG_KMS("enabling %s\n", power_well->name);
  791. power_well->ops->enable(dev_priv, power_well);
  792. power_well->hw_enabled = true;
  793. }
  794. }
  795. power_domains->domain_use_count[domain]++;
  796. mutex_unlock(&power_domains->lock);
  797. }
  798. /**
  799. * intel_display_power_put - release a power domain reference
  800. * @dev_priv: i915 device instance
  801. * @domain: power domain to reference
  802. *
  803. * This function drops the power domain reference obtained by
  804. * intel_display_power_get() and might power down the corresponding hardware
  805. * block right away if this is the last reference.
  806. */
  807. void intel_display_power_put(struct drm_i915_private *dev_priv,
  808. enum intel_display_power_domain domain)
  809. {
  810. struct i915_power_domains *power_domains;
  811. struct i915_power_well *power_well;
  812. int i;
  813. power_domains = &dev_priv->power_domains;
  814. mutex_lock(&power_domains->lock);
  815. WARN_ON(!power_domains->domain_use_count[domain]);
  816. power_domains->domain_use_count[domain]--;
  817. for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
  818. WARN_ON(!power_well->count);
  819. if (!--power_well->count && i915.disable_power_well) {
  820. DRM_DEBUG_KMS("disabling %s\n", power_well->name);
  821. power_well->hw_enabled = false;
  822. power_well->ops->disable(dev_priv, power_well);
  823. }
  824. }
  825. mutex_unlock(&power_domains->lock);
  826. intel_runtime_pm_put(dev_priv);
  827. }
  828. #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
  829. #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
  830. BIT(POWER_DOMAIN_PIPE_A) | \
  831. BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
  832. BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
  833. BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
  834. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  835. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  836. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  837. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  838. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  839. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  840. BIT(POWER_DOMAIN_PORT_CRT) | \
  841. BIT(POWER_DOMAIN_PLLS) | \
  842. BIT(POWER_DOMAIN_AUX_A) | \
  843. BIT(POWER_DOMAIN_AUX_B) | \
  844. BIT(POWER_DOMAIN_AUX_C) | \
  845. BIT(POWER_DOMAIN_AUX_D) | \
  846. BIT(POWER_DOMAIN_INIT))
  847. #define HSW_DISPLAY_POWER_DOMAINS ( \
  848. (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
  849. BIT(POWER_DOMAIN_INIT))
  850. #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
  851. HSW_ALWAYS_ON_POWER_DOMAINS | \
  852. BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
  853. #define BDW_DISPLAY_POWER_DOMAINS ( \
  854. (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
  855. BIT(POWER_DOMAIN_INIT))
  856. #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
  857. #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
  858. #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
  859. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  860. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  861. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  862. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  863. BIT(POWER_DOMAIN_PORT_CRT) | \
  864. BIT(POWER_DOMAIN_AUX_B) | \
  865. BIT(POWER_DOMAIN_AUX_C) | \
  866. BIT(POWER_DOMAIN_INIT))
  867. #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
  868. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  869. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  870. BIT(POWER_DOMAIN_AUX_B) | \
  871. BIT(POWER_DOMAIN_INIT))
  872. #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
  873. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  874. BIT(POWER_DOMAIN_AUX_B) | \
  875. BIT(POWER_DOMAIN_INIT))
  876. #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
  877. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  878. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  879. BIT(POWER_DOMAIN_AUX_C) | \
  880. BIT(POWER_DOMAIN_INIT))
  881. #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
  882. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  883. BIT(POWER_DOMAIN_AUX_C) | \
  884. BIT(POWER_DOMAIN_INIT))
  885. #define CHV_PIPE_A_POWER_DOMAINS ( \
  886. BIT(POWER_DOMAIN_PIPE_A) | \
  887. BIT(POWER_DOMAIN_INIT))
  888. #define CHV_PIPE_B_POWER_DOMAINS ( \
  889. BIT(POWER_DOMAIN_PIPE_B) | \
  890. BIT(POWER_DOMAIN_INIT))
  891. #define CHV_PIPE_C_POWER_DOMAINS ( \
  892. BIT(POWER_DOMAIN_PIPE_C) | \
  893. BIT(POWER_DOMAIN_INIT))
  894. #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
  895. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  896. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  897. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  898. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  899. BIT(POWER_DOMAIN_AUX_B) | \
  900. BIT(POWER_DOMAIN_AUX_C) | \
  901. BIT(POWER_DOMAIN_INIT))
  902. #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
  903. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  904. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  905. BIT(POWER_DOMAIN_AUX_D) | \
  906. BIT(POWER_DOMAIN_INIT))
  907. #define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
  908. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  909. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  910. BIT(POWER_DOMAIN_AUX_D) | \
  911. BIT(POWER_DOMAIN_INIT))
  912. #define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
  913. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  914. BIT(POWER_DOMAIN_AUX_D) | \
  915. BIT(POWER_DOMAIN_INIT))
  916. static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
  917. .sync_hw = i9xx_always_on_power_well_noop,
  918. .enable = i9xx_always_on_power_well_noop,
  919. .disable = i9xx_always_on_power_well_noop,
  920. .is_enabled = i9xx_always_on_power_well_enabled,
  921. };
  922. static const struct i915_power_well_ops chv_pipe_power_well_ops = {
  923. .sync_hw = chv_pipe_power_well_sync_hw,
  924. .enable = chv_pipe_power_well_enable,
  925. .disable = chv_pipe_power_well_disable,
  926. .is_enabled = chv_pipe_power_well_enabled,
  927. };
  928. static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
  929. .sync_hw = vlv_power_well_sync_hw,
  930. .enable = chv_dpio_cmn_power_well_enable,
  931. .disable = chv_dpio_cmn_power_well_disable,
  932. .is_enabled = vlv_power_well_enabled,
  933. };
  934. static struct i915_power_well i9xx_always_on_power_well[] = {
  935. {
  936. .name = "always-on",
  937. .always_on = 1,
  938. .domains = POWER_DOMAIN_MASK,
  939. .ops = &i9xx_always_on_power_well_ops,
  940. },
  941. };
  942. static const struct i915_power_well_ops hsw_power_well_ops = {
  943. .sync_hw = hsw_power_well_sync_hw,
  944. .enable = hsw_power_well_enable,
  945. .disable = hsw_power_well_disable,
  946. .is_enabled = hsw_power_well_enabled,
  947. };
  948. static const struct i915_power_well_ops skl_power_well_ops = {
  949. .sync_hw = skl_power_well_sync_hw,
  950. .enable = skl_power_well_enable,
  951. .disable = skl_power_well_disable,
  952. .is_enabled = skl_power_well_enabled,
  953. };
  954. static struct i915_power_well hsw_power_wells[] = {
  955. {
  956. .name = "always-on",
  957. .always_on = 1,
  958. .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
  959. .ops = &i9xx_always_on_power_well_ops,
  960. },
  961. {
  962. .name = "display",
  963. .domains = HSW_DISPLAY_POWER_DOMAINS,
  964. .ops = &hsw_power_well_ops,
  965. },
  966. };
  967. static struct i915_power_well bdw_power_wells[] = {
  968. {
  969. .name = "always-on",
  970. .always_on = 1,
  971. .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
  972. .ops = &i9xx_always_on_power_well_ops,
  973. },
  974. {
  975. .name = "display",
  976. .domains = BDW_DISPLAY_POWER_DOMAINS,
  977. .ops = &hsw_power_well_ops,
  978. },
  979. };
  980. static const struct i915_power_well_ops vlv_display_power_well_ops = {
  981. .sync_hw = vlv_power_well_sync_hw,
  982. .enable = vlv_display_power_well_enable,
  983. .disable = vlv_display_power_well_disable,
  984. .is_enabled = vlv_power_well_enabled,
  985. };
  986. static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
  987. .sync_hw = vlv_power_well_sync_hw,
  988. .enable = vlv_dpio_cmn_power_well_enable,
  989. .disable = vlv_dpio_cmn_power_well_disable,
  990. .is_enabled = vlv_power_well_enabled,
  991. };
  992. static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
  993. .sync_hw = vlv_power_well_sync_hw,
  994. .enable = vlv_power_well_enable,
  995. .disable = vlv_power_well_disable,
  996. .is_enabled = vlv_power_well_enabled,
  997. };
  998. static struct i915_power_well vlv_power_wells[] = {
  999. {
  1000. .name = "always-on",
  1001. .always_on = 1,
  1002. .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
  1003. .ops = &i9xx_always_on_power_well_ops,
  1004. },
  1005. {
  1006. .name = "display",
  1007. .domains = VLV_DISPLAY_POWER_DOMAINS,
  1008. .data = PUNIT_POWER_WELL_DISP2D,
  1009. .ops = &vlv_display_power_well_ops,
  1010. },
  1011. {
  1012. .name = "dpio-tx-b-01",
  1013. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1014. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1015. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1016. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1017. .ops = &vlv_dpio_power_well_ops,
  1018. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
  1019. },
  1020. {
  1021. .name = "dpio-tx-b-23",
  1022. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1023. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1024. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1025. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1026. .ops = &vlv_dpio_power_well_ops,
  1027. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
  1028. },
  1029. {
  1030. .name = "dpio-tx-c-01",
  1031. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1032. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1033. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1034. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1035. .ops = &vlv_dpio_power_well_ops,
  1036. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
  1037. },
  1038. {
  1039. .name = "dpio-tx-c-23",
  1040. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1041. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  1042. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1043. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1044. .ops = &vlv_dpio_power_well_ops,
  1045. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
  1046. },
  1047. {
  1048. .name = "dpio-common",
  1049. .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
  1050. .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
  1051. .ops = &vlv_dpio_cmn_power_well_ops,
  1052. },
  1053. };
  1054. static struct i915_power_well chv_power_wells[] = {
  1055. {
  1056. .name = "always-on",
  1057. .always_on = 1,
  1058. .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
  1059. .ops = &i9xx_always_on_power_well_ops,
  1060. },
  1061. #if 0
  1062. {
  1063. .name = "display",
  1064. .domains = VLV_DISPLAY_POWER_DOMAINS,
  1065. .data = PUNIT_POWER_WELL_DISP2D,
  1066. .ops = &vlv_display_power_well_ops,
  1067. },
  1068. #endif
  1069. {
  1070. .name = "pipe-a",
  1071. /*
  1072. * FIXME: pipe A power well seems to be the new disp2d well.
  1073. * At least all registers seem to be housed there. Figure
  1074. * out if this a a temporary situation in pre-production
  1075. * hardware or a permanent state of affairs.
  1076. */
  1077. .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
  1078. .data = PIPE_A,
  1079. .ops = &chv_pipe_power_well_ops,
  1080. },
  1081. #if 0
  1082. {
  1083. .name = "pipe-b",
  1084. .domains = CHV_PIPE_B_POWER_DOMAINS,
  1085. .data = PIPE_B,
  1086. .ops = &chv_pipe_power_well_ops,
  1087. },
  1088. {
  1089. .name = "pipe-c",
  1090. .domains = CHV_PIPE_C_POWER_DOMAINS,
  1091. .data = PIPE_C,
  1092. .ops = &chv_pipe_power_well_ops,
  1093. },
  1094. #endif
  1095. {
  1096. .name = "dpio-common-bc",
  1097. /*
  1098. * XXX: cmnreset for one PHY seems to disturb the other.
  1099. * As a workaround keep both powered on at the same
  1100. * time for now.
  1101. */
  1102. .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
  1103. .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
  1104. .ops = &chv_dpio_cmn_power_well_ops,
  1105. },
  1106. {
  1107. .name = "dpio-common-d",
  1108. /*
  1109. * XXX: cmnreset for one PHY seems to disturb the other.
  1110. * As a workaround keep both powered on at the same
  1111. * time for now.
  1112. */
  1113. .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
  1114. .data = PUNIT_POWER_WELL_DPIO_CMN_D,
  1115. .ops = &chv_dpio_cmn_power_well_ops,
  1116. },
  1117. #if 0
  1118. {
  1119. .name = "dpio-tx-b-01",
  1120. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1121. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
  1122. .ops = &vlv_dpio_power_well_ops,
  1123. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
  1124. },
  1125. {
  1126. .name = "dpio-tx-b-23",
  1127. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1128. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
  1129. .ops = &vlv_dpio_power_well_ops,
  1130. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
  1131. },
  1132. {
  1133. .name = "dpio-tx-c-01",
  1134. .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1135. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1136. .ops = &vlv_dpio_power_well_ops,
  1137. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
  1138. },
  1139. {
  1140. .name = "dpio-tx-c-23",
  1141. .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1142. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1143. .ops = &vlv_dpio_power_well_ops,
  1144. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
  1145. },
  1146. {
  1147. .name = "dpio-tx-d-01",
  1148. .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
  1149. CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
  1150. .ops = &vlv_dpio_power_well_ops,
  1151. .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
  1152. },
  1153. {
  1154. .name = "dpio-tx-d-23",
  1155. .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
  1156. CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
  1157. .ops = &vlv_dpio_power_well_ops,
  1158. .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
  1159. },
  1160. #endif
  1161. };
  1162. static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
  1163. enum punit_power_well power_well_id)
  1164. {
  1165. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1166. struct i915_power_well *power_well;
  1167. int i;
  1168. for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
  1169. if (power_well->data == power_well_id)
  1170. return power_well;
  1171. }
  1172. return NULL;
  1173. }
  1174. static struct i915_power_well skl_power_wells[] = {
  1175. {
  1176. .name = "always-on",
  1177. .always_on = 1,
  1178. .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
  1179. .ops = &i9xx_always_on_power_well_ops,
  1180. },
  1181. {
  1182. .name = "power well 1",
  1183. .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
  1184. .ops = &skl_power_well_ops,
  1185. .data = SKL_DISP_PW_1,
  1186. },
  1187. {
  1188. .name = "MISC IO power well",
  1189. .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
  1190. .ops = &skl_power_well_ops,
  1191. .data = SKL_DISP_PW_MISC_IO,
  1192. },
  1193. {
  1194. .name = "power well 2",
  1195. .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  1196. .ops = &skl_power_well_ops,
  1197. .data = SKL_DISP_PW_2,
  1198. },
  1199. {
  1200. .name = "DDI A/E power well",
  1201. .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
  1202. .ops = &skl_power_well_ops,
  1203. .data = SKL_DISP_PW_DDI_A_E,
  1204. },
  1205. {
  1206. .name = "DDI B power well",
  1207. .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
  1208. .ops = &skl_power_well_ops,
  1209. .data = SKL_DISP_PW_DDI_B,
  1210. },
  1211. {
  1212. .name = "DDI C power well",
  1213. .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
  1214. .ops = &skl_power_well_ops,
  1215. .data = SKL_DISP_PW_DDI_C,
  1216. },
  1217. {
  1218. .name = "DDI D power well",
  1219. .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
  1220. .ops = &skl_power_well_ops,
  1221. .data = SKL_DISP_PW_DDI_D,
  1222. },
  1223. };
  1224. static struct i915_power_well bxt_power_wells[] = {
  1225. {
  1226. .name = "always-on",
  1227. .always_on = 1,
  1228. .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
  1229. .ops = &i9xx_always_on_power_well_ops,
  1230. },
  1231. {
  1232. .name = "power well 1",
  1233. .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
  1234. .ops = &skl_power_well_ops,
  1235. .data = SKL_DISP_PW_1,
  1236. },
  1237. {
  1238. .name = "power well 2",
  1239. .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  1240. .ops = &skl_power_well_ops,
  1241. .data = SKL_DISP_PW_2,
  1242. }
  1243. };
  1244. #define set_power_wells(power_domains, __power_wells) ({ \
  1245. (power_domains)->power_wells = (__power_wells); \
  1246. (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
  1247. })
  1248. /**
  1249. * intel_power_domains_init - initializes the power domain structures
  1250. * @dev_priv: i915 device instance
  1251. *
  1252. * Initializes the power domain structures for @dev_priv depending upon the
  1253. * supported platform.
  1254. */
  1255. int intel_power_domains_init(struct drm_i915_private *dev_priv)
  1256. {
  1257. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1258. mutex_init(&power_domains->lock);
  1259. /*
  1260. * The enabling order will be from lower to higher indexed wells,
  1261. * the disabling order is reversed.
  1262. */
  1263. if (IS_HASWELL(dev_priv->dev)) {
  1264. set_power_wells(power_domains, hsw_power_wells);
  1265. } else if (IS_BROADWELL(dev_priv->dev)) {
  1266. set_power_wells(power_domains, bdw_power_wells);
  1267. } else if (IS_SKYLAKE(dev_priv->dev)) {
  1268. set_power_wells(power_domains, skl_power_wells);
  1269. } else if (IS_BROXTON(dev_priv->dev)) {
  1270. set_power_wells(power_domains, bxt_power_wells);
  1271. } else if (IS_CHERRYVIEW(dev_priv->dev)) {
  1272. set_power_wells(power_domains, chv_power_wells);
  1273. } else if (IS_VALLEYVIEW(dev_priv->dev)) {
  1274. set_power_wells(power_domains, vlv_power_wells);
  1275. } else {
  1276. set_power_wells(power_domains, i9xx_always_on_power_well);
  1277. }
  1278. return 0;
  1279. }
  1280. static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
  1281. {
  1282. struct drm_device *dev = dev_priv->dev;
  1283. struct device *device = &dev->pdev->dev;
  1284. if (!HAS_RUNTIME_PM(dev))
  1285. return;
  1286. if (!intel_enable_rc6(dev))
  1287. return;
  1288. /* Make sure we're not suspended first. */
  1289. pm_runtime_get_sync(device);
  1290. pm_runtime_disable(device);
  1291. }
  1292. /**
  1293. * intel_power_domains_fini - finalizes the power domain structures
  1294. * @dev_priv: i915 device instance
  1295. *
  1296. * Finalizes the power domain structures for @dev_priv depending upon the
  1297. * supported platform. This function also disables runtime pm and ensures that
  1298. * the device stays powered up so that the driver can be reloaded.
  1299. */
  1300. void intel_power_domains_fini(struct drm_i915_private *dev_priv)
  1301. {
  1302. intel_runtime_pm_disable(dev_priv);
  1303. /* The i915.ko module is still not prepared to be loaded when
  1304. * the power well is not enabled, so just enable it in case
  1305. * we're going to unload/reload. */
  1306. intel_display_set_init_power(dev_priv, true);
  1307. }
  1308. static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
  1309. {
  1310. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1311. struct i915_power_well *power_well;
  1312. int i;
  1313. mutex_lock(&power_domains->lock);
  1314. for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
  1315. power_well->ops->sync_hw(dev_priv, power_well);
  1316. power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
  1317. power_well);
  1318. }
  1319. mutex_unlock(&power_domains->lock);
  1320. }
  1321. static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
  1322. {
  1323. struct i915_power_well *cmn =
  1324. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  1325. struct i915_power_well *disp2d =
  1326. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
  1327. /* If the display might be already active skip this */
  1328. if (cmn->ops->is_enabled(dev_priv, cmn) &&
  1329. disp2d->ops->is_enabled(dev_priv, disp2d) &&
  1330. I915_READ(DPIO_CTL) & DPIO_CMNRST)
  1331. return;
  1332. DRM_DEBUG_KMS("toggling display PHY side reset\n");
  1333. /* cmnlane needs DPLL registers */
  1334. disp2d->ops->enable(dev_priv, disp2d);
  1335. /*
  1336. * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
  1337. * Need to assert and de-assert PHY SB reset by gating the
  1338. * common lane power, then un-gating it.
  1339. * Simply ungating isn't enough to reset the PHY enough to get
  1340. * ports and lanes running.
  1341. */
  1342. cmn->ops->disable(dev_priv, cmn);
  1343. }
  1344. /**
  1345. * intel_power_domains_init_hw - initialize hardware power domain state
  1346. * @dev_priv: i915 device instance
  1347. *
  1348. * This function initializes the hardware power domain state and enables all
  1349. * power domains using intel_display_set_init_power().
  1350. */
  1351. void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
  1352. {
  1353. struct drm_device *dev = dev_priv->dev;
  1354. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1355. power_domains->initializing = true;
  1356. if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
  1357. mutex_lock(&power_domains->lock);
  1358. vlv_cmnlane_wa(dev_priv);
  1359. mutex_unlock(&power_domains->lock);
  1360. }
  1361. /* For now, we need the power well to be always enabled. */
  1362. intel_display_set_init_power(dev_priv, true);
  1363. intel_power_domains_resume(dev_priv);
  1364. power_domains->initializing = false;
  1365. }
  1366. /**
  1367. * intel_aux_display_runtime_get - grab an auxiliary power domain reference
  1368. * @dev_priv: i915 device instance
  1369. *
  1370. * This function grabs a power domain reference for the auxiliary power domain
  1371. * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
  1372. * parents are powered up. Therefore users should only grab a reference to the
  1373. * innermost power domain they need.
  1374. *
  1375. * Any power domain reference obtained by this function must have a symmetric
  1376. * call to intel_aux_display_runtime_put() to release the reference again.
  1377. */
  1378. void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
  1379. {
  1380. intel_runtime_pm_get(dev_priv);
  1381. }
  1382. /**
  1383. * intel_aux_display_runtime_put - release an auxiliary power domain reference
  1384. * @dev_priv: i915 device instance
  1385. *
  1386. * This function drops the auxiliary power domain reference obtained by
  1387. * intel_aux_display_runtime_get() and might power down the corresponding
  1388. * hardware block right away if this is the last reference.
  1389. */
  1390. void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
  1391. {
  1392. intel_runtime_pm_put(dev_priv);
  1393. }
  1394. /**
  1395. * intel_runtime_pm_get - grab a runtime pm reference
  1396. * @dev_priv: i915 device instance
  1397. *
  1398. * This function grabs a device-level runtime pm reference (mostly used for GEM
  1399. * code to ensure the GTT or GT is on) and ensures that it is powered up.
  1400. *
  1401. * Any runtime pm reference obtained by this function must have a symmetric
  1402. * call to intel_runtime_pm_put() to release the reference again.
  1403. */
  1404. void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  1405. {
  1406. struct drm_device *dev = dev_priv->dev;
  1407. struct device *device = &dev->pdev->dev;
  1408. if (!HAS_RUNTIME_PM(dev))
  1409. return;
  1410. pm_runtime_get_sync(device);
  1411. WARN(dev_priv->pm.suspended, "Device still suspended.\n");
  1412. }
  1413. /**
  1414. * intel_runtime_pm_get_noresume - grab a runtime pm reference
  1415. * @dev_priv: i915 device instance
  1416. *
  1417. * This function grabs a device-level runtime pm reference (mostly used for GEM
  1418. * code to ensure the GTT or GT is on).
  1419. *
  1420. * It will _not_ power up the device but instead only check that it's powered
  1421. * on. Therefore it is only valid to call this functions from contexts where
  1422. * the device is known to be powered up and where trying to power it up would
  1423. * result in hilarity and deadlocks. That pretty much means only the system
  1424. * suspend/resume code where this is used to grab runtime pm references for
  1425. * delayed setup down in work items.
  1426. *
  1427. * Any runtime pm reference obtained by this function must have a symmetric
  1428. * call to intel_runtime_pm_put() to release the reference again.
  1429. */
  1430. void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  1431. {
  1432. struct drm_device *dev = dev_priv->dev;
  1433. struct device *device = &dev->pdev->dev;
  1434. if (!HAS_RUNTIME_PM(dev))
  1435. return;
  1436. WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
  1437. pm_runtime_get_noresume(device);
  1438. }
  1439. /**
  1440. * intel_runtime_pm_put - release a runtime pm reference
  1441. * @dev_priv: i915 device instance
  1442. *
  1443. * This function drops the device-level runtime pm reference obtained by
  1444. * intel_runtime_pm_get() and might power down the corresponding
  1445. * hardware block right away if this is the last reference.
  1446. */
  1447. void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  1448. {
  1449. struct drm_device *dev = dev_priv->dev;
  1450. struct device *device = &dev->pdev->dev;
  1451. if (!HAS_RUNTIME_PM(dev))
  1452. return;
  1453. pm_runtime_mark_last_busy(device);
  1454. pm_runtime_put_autosuspend(device);
  1455. }
  1456. /**
  1457. * intel_runtime_pm_enable - enable runtime pm
  1458. * @dev_priv: i915 device instance
  1459. *
  1460. * This function enables runtime pm at the end of the driver load sequence.
  1461. *
  1462. * Note that this function does currently not enable runtime pm for the
  1463. * subordinate display power domains. That is only done on the first modeset
  1464. * using intel_display_set_init_power().
  1465. */
  1466. void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
  1467. {
  1468. struct drm_device *dev = dev_priv->dev;
  1469. struct device *device = &dev->pdev->dev;
  1470. if (!HAS_RUNTIME_PM(dev))
  1471. return;
  1472. pm_runtime_set_active(device);
  1473. /*
  1474. * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
  1475. * requirement.
  1476. */
  1477. if (!intel_enable_rc6(dev)) {
  1478. DRM_INFO("RC6 disabled, disabling runtime PM support\n");
  1479. return;
  1480. }
  1481. pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
  1482. pm_runtime_mark_last_busy(device);
  1483. pm_runtime_use_autosuspend(device);
  1484. pm_runtime_put_autosuspend(device);
  1485. }