intel_runtime_pm.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594
  1. /*
  2. * Copyright © 2012-2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eugeni Dodonov <eugeni.dodonov@intel.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. *
  27. */
  28. #include <linux/pm_runtime.h>
  29. #include <linux/vgaarb.h>
  30. #include "i915_drv.h"
  31. #include "intel_drv.h"
  32. /**
  33. * DOC: runtime pm
  34. *
  35. * The i915 driver supports dynamic enabling and disabling of entire hardware
  36. * blocks at runtime. This is especially important on the display side where
  37. * software is supposed to control many power gates manually on recent hardware,
  38. * since on the GT side a lot of the power management is done by the hardware.
  39. * But even there some manual control at the device level is required.
  40. *
  41. * Since i915 supports a diverse set of platforms with a unified codebase and
  42. * hardware engineers just love to shuffle functionality around between power
  43. * domains there's a sizeable amount of indirection required. This file provides
  44. * generic functions to the driver for grabbing and releasing references for
  45. * abstract power domains. It then maps those to the actual power wells
  46. * present for a given platform.
  47. */
  48. #define for_each_power_well(i, power_well, domain_mask, power_domains) \
  49. for (i = 0; \
  50. i < (power_domains)->power_well_count && \
  51. ((power_well) = &(power_domains)->power_wells[i]); \
  52. i++) \
  53. if ((power_well)->domains & (domain_mask))
  54. #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
  55. for (i = (power_domains)->power_well_count - 1; \
  56. i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
  57. i--) \
  58. if ((power_well)->domains & (domain_mask))
  59. /*
  60. * We should only use the power well if we explicitly asked the hardware to
  61. * enable it, so check if it's enabled and also check if we've requested it to
  62. * be enabled.
  63. */
  64. static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
  65. struct i915_power_well *power_well)
  66. {
  67. return I915_READ(HSW_PWR_WELL_DRIVER) ==
  68. (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
  69. }
  70. /**
  71. * __intel_display_power_is_enabled - unlocked check for a power domain
  72. * @dev_priv: i915 device instance
  73. * @domain: power domain to check
  74. *
  75. * This is the unlocked version of intel_display_power_is_enabled() and should
  76. * only be used from error capture and recovery code where deadlocks are
  77. * possible.
  78. *
  79. * Returns:
  80. * True when the power domain is enabled, false otherwise.
  81. */
  82. bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  83. enum intel_display_power_domain domain)
  84. {
  85. struct i915_power_domains *power_domains;
  86. struct i915_power_well *power_well;
  87. bool is_enabled;
  88. int i;
  89. if (dev_priv->pm.suspended)
  90. return false;
  91. power_domains = &dev_priv->power_domains;
  92. is_enabled = true;
  93. for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
  94. if (power_well->always_on)
  95. continue;
  96. if (!power_well->hw_enabled) {
  97. is_enabled = false;
  98. break;
  99. }
  100. }
  101. return is_enabled;
  102. }
  103. /**
  104. * intel_display_power_is_enabled - check for a power domain
  105. * @dev_priv: i915 device instance
  106. * @domain: power domain to check
  107. *
  108. * This function can be used to check the hw power domain state. It is mostly
  109. * used in hardware state readout functions. Everywhere else code should rely
  110. * upon explicit power domain reference counting to ensure that the hardware
  111. * block is powered up before accessing it.
  112. *
  113. * Callers must hold the relevant modesetting locks to ensure that concurrent
  114. * threads can't disable the power well while the caller tries to read a few
  115. * registers.
  116. *
  117. * Returns:
  118. * True when the power domain is enabled, false otherwise.
  119. */
  120. bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  121. enum intel_display_power_domain domain)
  122. {
  123. struct i915_power_domains *power_domains;
  124. bool ret;
  125. power_domains = &dev_priv->power_domains;
  126. mutex_lock(&power_domains->lock);
  127. ret = __intel_display_power_is_enabled(dev_priv, domain);
  128. mutex_unlock(&power_domains->lock);
  129. return ret;
  130. }
  131. /**
  132. * intel_display_set_init_power - set the initial power domain state
  133. * @dev_priv: i915 device instance
  134. * @enable: whether to enable or disable the initial power domain state
  135. *
  136. * For simplicity our driver load/unload and system suspend/resume code assumes
  137. * that all power domains are always enabled. This functions controls the state
  138. * of this little hack. While the initial power domain state is enabled runtime
  139. * pm is effectively disabled.
  140. */
  141. void intel_display_set_init_power(struct drm_i915_private *dev_priv,
  142. bool enable)
  143. {
  144. if (dev_priv->power_domains.init_power_on == enable)
  145. return;
  146. if (enable)
  147. intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  148. else
  149. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  150. dev_priv->power_domains.init_power_on = enable;
  151. }
  152. /*
  153. * Starting with Haswell, we have a "Power Down Well" that can be turned off
  154. * when not needed anymore. We have 4 registers that can request the power well
  155. * to be enabled, and it will only be disabled if none of the registers is
  156. * requesting it to be enabled.
  157. */
  158. static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
  159. {
  160. struct drm_device *dev = dev_priv->dev;
  161. /*
  162. * After we re-enable the power well, if we touch VGA register 0x3d5
  163. * we'll get unclaimed register interrupts. This stops after we write
  164. * anything to the VGA MSR register. The vgacon module uses this
  165. * register all the time, so if we unbind our driver and, as a
  166. * consequence, bind vgacon, we'll get stuck in an infinite loop at
  167. * console_unlock(). So make here we touch the VGA MSR register, making
  168. * sure vgacon can keep working normally without triggering interrupts
  169. * and error messages.
  170. */
  171. vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
  172. outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  173. vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
  174. if (IS_BROADWELL(dev))
  175. gen8_irq_power_well_post_enable(dev_priv,
  176. 1 << PIPE_C | 1 << PIPE_B);
  177. }
  178. static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
  179. struct i915_power_well *power_well)
  180. {
  181. struct drm_device *dev = dev_priv->dev;
  182. /*
  183. * After we re-enable the power well, if we touch VGA register 0x3d5
  184. * we'll get unclaimed register interrupts. This stops after we write
  185. * anything to the VGA MSR register. The vgacon module uses this
  186. * register all the time, so if we unbind our driver and, as a
  187. * consequence, bind vgacon, we'll get stuck in an infinite loop at
  188. * console_unlock(). So make here we touch the VGA MSR register, making
  189. * sure vgacon can keep working normally without triggering interrupts
  190. * and error messages.
  191. */
  192. if (power_well->data == SKL_DISP_PW_2) {
  193. vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
  194. outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  195. vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
  196. gen8_irq_power_well_post_enable(dev_priv,
  197. 1 << PIPE_C | 1 << PIPE_B);
  198. }
  199. if (power_well->data == SKL_DISP_PW_1) {
  200. intel_prepare_ddi(dev);
  201. gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
  202. }
  203. }
  204. static void hsw_set_power_well(struct drm_i915_private *dev_priv,
  205. struct i915_power_well *power_well, bool enable)
  206. {
  207. bool is_enabled, enable_requested;
  208. uint32_t tmp;
  209. tmp = I915_READ(HSW_PWR_WELL_DRIVER);
  210. is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
  211. enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
  212. if (enable) {
  213. if (!enable_requested)
  214. I915_WRITE(HSW_PWR_WELL_DRIVER,
  215. HSW_PWR_WELL_ENABLE_REQUEST);
  216. if (!is_enabled) {
  217. DRM_DEBUG_KMS("Enabling power well\n");
  218. if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
  219. HSW_PWR_WELL_STATE_ENABLED), 20))
  220. DRM_ERROR("Timeout enabling power well\n");
  221. hsw_power_well_post_enable(dev_priv);
  222. }
  223. } else {
  224. if (enable_requested) {
  225. I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
  226. POSTING_READ(HSW_PWR_WELL_DRIVER);
  227. DRM_DEBUG_KMS("Requesting to disable the power well\n");
  228. }
  229. }
  230. }
  231. #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
  232. BIT(POWER_DOMAIN_TRANSCODER_A) | \
  233. BIT(POWER_DOMAIN_PIPE_B) | \
  234. BIT(POWER_DOMAIN_TRANSCODER_B) | \
  235. BIT(POWER_DOMAIN_PIPE_C) | \
  236. BIT(POWER_DOMAIN_TRANSCODER_C) | \
  237. BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
  238. BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
  239. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  240. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  241. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  242. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  243. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  244. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  245. BIT(POWER_DOMAIN_AUX_B) | \
  246. BIT(POWER_DOMAIN_AUX_C) | \
  247. BIT(POWER_DOMAIN_AUX_D) | \
  248. BIT(POWER_DOMAIN_AUDIO) | \
  249. BIT(POWER_DOMAIN_VGA) | \
  250. BIT(POWER_DOMAIN_INIT))
  251. #define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
  252. SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  253. BIT(POWER_DOMAIN_PLLS) | \
  254. BIT(POWER_DOMAIN_PIPE_A) | \
  255. BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
  256. BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
  257. BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
  258. BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
  259. BIT(POWER_DOMAIN_AUX_A) | \
  260. BIT(POWER_DOMAIN_INIT))
  261. #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
  262. BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
  263. BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
  264. BIT(POWER_DOMAIN_INIT))
  265. #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
  266. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  267. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  268. BIT(POWER_DOMAIN_INIT))
  269. #define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
  270. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  271. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  272. BIT(POWER_DOMAIN_INIT))
  273. #define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
  274. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  275. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  276. BIT(POWER_DOMAIN_INIT))
  277. #define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
  278. SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
  279. #define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
  280. (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
  281. SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
  282. SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \
  283. SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
  284. SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
  285. SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
  286. SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
  287. BIT(POWER_DOMAIN_INIT))
  288. static void skl_set_power_well(struct drm_i915_private *dev_priv,
  289. struct i915_power_well *power_well, bool enable)
  290. {
  291. uint32_t tmp, fuse_status;
  292. uint32_t req_mask, state_mask;
  293. bool is_enabled, enable_requested, check_fuse_status = false;
  294. tmp = I915_READ(HSW_PWR_WELL_DRIVER);
  295. fuse_status = I915_READ(SKL_FUSE_STATUS);
  296. switch (power_well->data) {
  297. case SKL_DISP_PW_1:
  298. if (wait_for((I915_READ(SKL_FUSE_STATUS) &
  299. SKL_FUSE_PG0_DIST_STATUS), 1)) {
  300. DRM_ERROR("PG0 not enabled\n");
  301. return;
  302. }
  303. break;
  304. case SKL_DISP_PW_2:
  305. if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
  306. DRM_ERROR("PG1 in disabled state\n");
  307. return;
  308. }
  309. break;
  310. case SKL_DISP_PW_DDI_A_E:
  311. case SKL_DISP_PW_DDI_B:
  312. case SKL_DISP_PW_DDI_C:
  313. case SKL_DISP_PW_DDI_D:
  314. case SKL_DISP_PW_MISC_IO:
  315. break;
  316. default:
  317. WARN(1, "Unknown power well %lu\n", power_well->data);
  318. return;
  319. }
  320. req_mask = SKL_POWER_WELL_REQ(power_well->data);
  321. enable_requested = tmp & req_mask;
  322. state_mask = SKL_POWER_WELL_STATE(power_well->data);
  323. is_enabled = tmp & state_mask;
  324. if (enable) {
  325. if (!enable_requested) {
  326. I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
  327. }
  328. if (!is_enabled) {
  329. DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
  330. if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
  331. state_mask), 1))
  332. DRM_ERROR("%s enable timeout\n",
  333. power_well->name);
  334. check_fuse_status = true;
  335. }
  336. } else {
  337. if (enable_requested) {
  338. I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
  339. POSTING_READ(HSW_PWR_WELL_DRIVER);
  340. DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
  341. }
  342. }
  343. if (check_fuse_status) {
  344. if (power_well->data == SKL_DISP_PW_1) {
  345. if (wait_for((I915_READ(SKL_FUSE_STATUS) &
  346. SKL_FUSE_PG1_DIST_STATUS), 1))
  347. DRM_ERROR("PG1 distributing status timeout\n");
  348. } else if (power_well->data == SKL_DISP_PW_2) {
  349. if (wait_for((I915_READ(SKL_FUSE_STATUS) &
  350. SKL_FUSE_PG2_DIST_STATUS), 1))
  351. DRM_ERROR("PG2 distributing status timeout\n");
  352. }
  353. }
  354. if (enable && !is_enabled)
  355. skl_power_well_post_enable(dev_priv, power_well);
  356. }
  357. static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
  358. struct i915_power_well *power_well)
  359. {
  360. hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
  361. /*
  362. * We're taking over the BIOS, so clear any requests made by it since
  363. * the driver is in charge now.
  364. */
  365. if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
  366. I915_WRITE(HSW_PWR_WELL_BIOS, 0);
  367. }
  368. static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
  369. struct i915_power_well *power_well)
  370. {
  371. hsw_set_power_well(dev_priv, power_well, true);
  372. }
  373. static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
  374. struct i915_power_well *power_well)
  375. {
  376. hsw_set_power_well(dev_priv, power_well, false);
  377. }
  378. static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
  379. struct i915_power_well *power_well)
  380. {
  381. uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
  382. SKL_POWER_WELL_STATE(power_well->data);
  383. return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
  384. }
  385. static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
  386. struct i915_power_well *power_well)
  387. {
  388. skl_set_power_well(dev_priv, power_well, power_well->count > 0);
  389. /* Clear any request made by BIOS as driver is taking over */
  390. I915_WRITE(HSW_PWR_WELL_BIOS, 0);
  391. }
  392. static void skl_power_well_enable(struct drm_i915_private *dev_priv,
  393. struct i915_power_well *power_well)
  394. {
  395. skl_set_power_well(dev_priv, power_well, true);
  396. }
  397. static void skl_power_well_disable(struct drm_i915_private *dev_priv,
  398. struct i915_power_well *power_well)
  399. {
  400. skl_set_power_well(dev_priv, power_well, false);
  401. }
  402. static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
  403. struct i915_power_well *power_well)
  404. {
  405. }
  406. static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
  407. struct i915_power_well *power_well)
  408. {
  409. return true;
  410. }
  411. static void vlv_set_power_well(struct drm_i915_private *dev_priv,
  412. struct i915_power_well *power_well, bool enable)
  413. {
  414. enum punit_power_well power_well_id = power_well->data;
  415. u32 mask;
  416. u32 state;
  417. u32 ctrl;
  418. mask = PUNIT_PWRGT_MASK(power_well_id);
  419. state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
  420. PUNIT_PWRGT_PWR_GATE(power_well_id);
  421. mutex_lock(&dev_priv->rps.hw_lock);
  422. #define COND \
  423. ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
  424. if (COND)
  425. goto out;
  426. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
  427. ctrl &= ~mask;
  428. ctrl |= state;
  429. vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
  430. if (wait_for(COND, 100))
  431. DRM_ERROR("timout setting power well state %08x (%08x)\n",
  432. state,
  433. vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
  434. #undef COND
  435. out:
  436. mutex_unlock(&dev_priv->rps.hw_lock);
  437. }
  438. static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
  439. struct i915_power_well *power_well)
  440. {
  441. vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
  442. }
  443. static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
  444. struct i915_power_well *power_well)
  445. {
  446. vlv_set_power_well(dev_priv, power_well, true);
  447. }
  448. static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
  449. struct i915_power_well *power_well)
  450. {
  451. vlv_set_power_well(dev_priv, power_well, false);
  452. }
  453. static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
  454. struct i915_power_well *power_well)
  455. {
  456. int power_well_id = power_well->data;
  457. bool enabled = false;
  458. u32 mask;
  459. u32 state;
  460. u32 ctrl;
  461. mask = PUNIT_PWRGT_MASK(power_well_id);
  462. ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
  463. mutex_lock(&dev_priv->rps.hw_lock);
  464. state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
  465. /*
  466. * We only ever set the power-on and power-gate states, anything
  467. * else is unexpected.
  468. */
  469. WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
  470. state != PUNIT_PWRGT_PWR_GATE(power_well_id));
  471. if (state == ctrl)
  472. enabled = true;
  473. /*
  474. * A transient state at this point would mean some unexpected party
  475. * is poking at the power controls too.
  476. */
  477. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
  478. WARN_ON(ctrl != state);
  479. mutex_unlock(&dev_priv->rps.hw_lock);
  480. return enabled;
  481. }
  482. static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
  483. struct i915_power_well *power_well)
  484. {
  485. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
  486. vlv_set_power_well(dev_priv, power_well, true);
  487. spin_lock_irq(&dev_priv->irq_lock);
  488. valleyview_enable_display_irqs(dev_priv);
  489. spin_unlock_irq(&dev_priv->irq_lock);
  490. /*
  491. * During driver initialization/resume we can avoid restoring the
  492. * part of the HW/SW state that will be inited anyway explicitly.
  493. */
  494. if (dev_priv->power_domains.initializing)
  495. return;
  496. intel_hpd_init(dev_priv);
  497. i915_redisable_vga_power_on(dev_priv->dev);
  498. }
  499. static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
  500. struct i915_power_well *power_well)
  501. {
  502. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
  503. spin_lock_irq(&dev_priv->irq_lock);
  504. valleyview_disable_display_irqs(dev_priv);
  505. spin_unlock_irq(&dev_priv->irq_lock);
  506. vlv_set_power_well(dev_priv, power_well, false);
  507. vlv_power_sequencer_reset(dev_priv);
  508. }
  509. static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  510. struct i915_power_well *power_well)
  511. {
  512. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
  513. /*
  514. * Enable the CRI clock source so we can get at the
  515. * display and the reference clock for VGA
  516. * hotplug / manual detection.
  517. */
  518. I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
  519. DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
  520. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  521. vlv_set_power_well(dev_priv, power_well, true);
  522. /*
  523. * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
  524. * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
  525. * a. GUnit 0x2110 bit[0] set to 1 (def 0)
  526. * b. The other bits such as sfr settings / modesel may all
  527. * be set to 0.
  528. *
  529. * This should only be done on init and resume from S3 with
  530. * both PLLs disabled, or we risk losing DPIO and PLL
  531. * synchronization.
  532. */
  533. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
  534. }
  535. static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  536. struct i915_power_well *power_well)
  537. {
  538. enum pipe pipe;
  539. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
  540. for_each_pipe(dev_priv, pipe)
  541. assert_pll_disabled(dev_priv, pipe);
  542. /* Assert common reset */
  543. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
  544. vlv_set_power_well(dev_priv, power_well, false);
  545. }
  546. static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  547. struct i915_power_well *power_well)
  548. {
  549. enum dpio_phy phy;
  550. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  551. power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
  552. /*
  553. * Enable the CRI clock source so we can get at the
  554. * display and the reference clock for VGA
  555. * hotplug / manual detection.
  556. */
  557. if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  558. phy = DPIO_PHY0;
  559. I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
  560. DPLL_REFA_CLK_ENABLE_VLV);
  561. I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
  562. DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
  563. } else {
  564. phy = DPIO_PHY1;
  565. I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
  566. DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
  567. }
  568. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  569. vlv_set_power_well(dev_priv, power_well, true);
  570. /* Poll for phypwrgood signal */
  571. if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
  572. DRM_ERROR("Display PHY %d is not power up\n", phy);
  573. I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
  574. PHY_COM_LANE_RESET_DEASSERT(phy));
  575. }
  576. static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  577. struct i915_power_well *power_well)
  578. {
  579. enum dpio_phy phy;
  580. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  581. power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
  582. if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  583. phy = DPIO_PHY0;
  584. assert_pll_disabled(dev_priv, PIPE_A);
  585. assert_pll_disabled(dev_priv, PIPE_B);
  586. } else {
  587. phy = DPIO_PHY1;
  588. assert_pll_disabled(dev_priv, PIPE_C);
  589. }
  590. I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
  591. ~PHY_COM_LANE_RESET_DEASSERT(phy));
  592. vlv_set_power_well(dev_priv, power_well, false);
  593. }
  594. static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
  595. struct i915_power_well *power_well)
  596. {
  597. enum pipe pipe = power_well->data;
  598. bool enabled;
  599. u32 state, ctrl;
  600. mutex_lock(&dev_priv->rps.hw_lock);
  601. state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
  602. /*
  603. * We only ever set the power-on and power-gate states, anything
  604. * else is unexpected.
  605. */
  606. WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
  607. enabled = state == DP_SSS_PWR_ON(pipe);
  608. /*
  609. * A transient state at this point would mean some unexpected party
  610. * is poking at the power controls too.
  611. */
  612. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
  613. WARN_ON(ctrl << 16 != state);
  614. mutex_unlock(&dev_priv->rps.hw_lock);
  615. return enabled;
  616. }
  617. static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
  618. struct i915_power_well *power_well,
  619. bool enable)
  620. {
  621. enum pipe pipe = power_well->data;
  622. u32 state;
  623. u32 ctrl;
  624. state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
  625. mutex_lock(&dev_priv->rps.hw_lock);
  626. #define COND \
  627. ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
  628. if (COND)
  629. goto out;
  630. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  631. ctrl &= ~DP_SSC_MASK(pipe);
  632. ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
  633. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
  634. if (wait_for(COND, 100))
  635. DRM_ERROR("timout setting power well state %08x (%08x)\n",
  636. state,
  637. vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
  638. #undef COND
  639. out:
  640. mutex_unlock(&dev_priv->rps.hw_lock);
  641. }
  642. static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
  643. struct i915_power_well *power_well)
  644. {
  645. chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
  646. }
  647. static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
  648. struct i915_power_well *power_well)
  649. {
  650. WARN_ON_ONCE(power_well->data != PIPE_A &&
  651. power_well->data != PIPE_B &&
  652. power_well->data != PIPE_C);
  653. chv_set_pipe_power_well(dev_priv, power_well, true);
  654. if (power_well->data == PIPE_A) {
  655. spin_lock_irq(&dev_priv->irq_lock);
  656. valleyview_enable_display_irqs(dev_priv);
  657. spin_unlock_irq(&dev_priv->irq_lock);
  658. /*
  659. * During driver initialization/resume we can avoid restoring the
  660. * part of the HW/SW state that will be inited anyway explicitly.
  661. */
  662. if (dev_priv->power_domains.initializing)
  663. return;
  664. intel_hpd_init(dev_priv);
  665. i915_redisable_vga_power_on(dev_priv->dev);
  666. }
  667. }
  668. static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
  669. struct i915_power_well *power_well)
  670. {
  671. WARN_ON_ONCE(power_well->data != PIPE_A &&
  672. power_well->data != PIPE_B &&
  673. power_well->data != PIPE_C);
  674. if (power_well->data == PIPE_A) {
  675. spin_lock_irq(&dev_priv->irq_lock);
  676. valleyview_disable_display_irqs(dev_priv);
  677. spin_unlock_irq(&dev_priv->irq_lock);
  678. }
  679. chv_set_pipe_power_well(dev_priv, power_well, false);
  680. if (power_well->data == PIPE_A)
  681. vlv_power_sequencer_reset(dev_priv);
  682. }
  683. /**
  684. * intel_display_power_get - grab a power domain reference
  685. * @dev_priv: i915 device instance
  686. * @domain: power domain to reference
  687. *
  688. * This function grabs a power domain reference for @domain and ensures that the
  689. * power domain and all its parents are powered up. Therefore users should only
  690. * grab a reference to the innermost power domain they need.
  691. *
  692. * Any power domain reference obtained by this function must have a symmetric
  693. * call to intel_display_power_put() to release the reference again.
  694. */
  695. void intel_display_power_get(struct drm_i915_private *dev_priv,
  696. enum intel_display_power_domain domain)
  697. {
  698. struct i915_power_domains *power_domains;
  699. struct i915_power_well *power_well;
  700. int i;
  701. intel_runtime_pm_get(dev_priv);
  702. power_domains = &dev_priv->power_domains;
  703. mutex_lock(&power_domains->lock);
  704. for_each_power_well(i, power_well, BIT(domain), power_domains) {
  705. if (!power_well->count++) {
  706. DRM_DEBUG_KMS("enabling %s\n", power_well->name);
  707. power_well->ops->enable(dev_priv, power_well);
  708. power_well->hw_enabled = true;
  709. }
  710. }
  711. power_domains->domain_use_count[domain]++;
  712. mutex_unlock(&power_domains->lock);
  713. }
  714. /**
  715. * intel_display_power_put - release a power domain reference
  716. * @dev_priv: i915 device instance
  717. * @domain: power domain to reference
  718. *
  719. * This function drops the power domain reference obtained by
  720. * intel_display_power_get() and might power down the corresponding hardware
  721. * block right away if this is the last reference.
  722. */
  723. void intel_display_power_put(struct drm_i915_private *dev_priv,
  724. enum intel_display_power_domain domain)
  725. {
  726. struct i915_power_domains *power_domains;
  727. struct i915_power_well *power_well;
  728. int i;
  729. power_domains = &dev_priv->power_domains;
  730. mutex_lock(&power_domains->lock);
  731. WARN_ON(!power_domains->domain_use_count[domain]);
  732. power_domains->domain_use_count[domain]--;
  733. for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
  734. WARN_ON(!power_well->count);
  735. if (!--power_well->count && i915.disable_power_well) {
  736. DRM_DEBUG_KMS("disabling %s\n", power_well->name);
  737. power_well->hw_enabled = false;
  738. power_well->ops->disable(dev_priv, power_well);
  739. }
  740. }
  741. mutex_unlock(&power_domains->lock);
  742. intel_runtime_pm_put(dev_priv);
  743. }
  744. #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
  745. #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
  746. BIT(POWER_DOMAIN_PIPE_A) | \
  747. BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
  748. BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
  749. BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
  750. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  751. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  752. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  753. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  754. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  755. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  756. BIT(POWER_DOMAIN_PORT_CRT) | \
  757. BIT(POWER_DOMAIN_PLLS) | \
  758. BIT(POWER_DOMAIN_AUX_A) | \
  759. BIT(POWER_DOMAIN_AUX_B) | \
  760. BIT(POWER_DOMAIN_AUX_C) | \
  761. BIT(POWER_DOMAIN_AUX_D) | \
  762. BIT(POWER_DOMAIN_INIT))
  763. #define HSW_DISPLAY_POWER_DOMAINS ( \
  764. (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
  765. BIT(POWER_DOMAIN_INIT))
  766. #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
  767. HSW_ALWAYS_ON_POWER_DOMAINS | \
  768. BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
  769. #define BDW_DISPLAY_POWER_DOMAINS ( \
  770. (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
  771. BIT(POWER_DOMAIN_INIT))
  772. #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
  773. #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
  774. #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
  775. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  776. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  777. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  778. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  779. BIT(POWER_DOMAIN_PORT_CRT) | \
  780. BIT(POWER_DOMAIN_AUX_B) | \
  781. BIT(POWER_DOMAIN_AUX_C) | \
  782. BIT(POWER_DOMAIN_INIT))
  783. #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
  784. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  785. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  786. BIT(POWER_DOMAIN_AUX_B) | \
  787. BIT(POWER_DOMAIN_INIT))
  788. #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
  789. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  790. BIT(POWER_DOMAIN_AUX_B) | \
  791. BIT(POWER_DOMAIN_INIT))
  792. #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
  793. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  794. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  795. BIT(POWER_DOMAIN_AUX_C) | \
  796. BIT(POWER_DOMAIN_INIT))
  797. #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
  798. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  799. BIT(POWER_DOMAIN_AUX_C) | \
  800. BIT(POWER_DOMAIN_INIT))
  801. #define CHV_PIPE_A_POWER_DOMAINS ( \
  802. BIT(POWER_DOMAIN_PIPE_A) | \
  803. BIT(POWER_DOMAIN_INIT))
  804. #define CHV_PIPE_B_POWER_DOMAINS ( \
  805. BIT(POWER_DOMAIN_PIPE_B) | \
  806. BIT(POWER_DOMAIN_INIT))
  807. #define CHV_PIPE_C_POWER_DOMAINS ( \
  808. BIT(POWER_DOMAIN_PIPE_C) | \
  809. BIT(POWER_DOMAIN_INIT))
  810. #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
  811. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  812. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  813. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  814. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  815. BIT(POWER_DOMAIN_AUX_B) | \
  816. BIT(POWER_DOMAIN_AUX_C) | \
  817. BIT(POWER_DOMAIN_INIT))
  818. #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
  819. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  820. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  821. BIT(POWER_DOMAIN_AUX_D) | \
  822. BIT(POWER_DOMAIN_INIT))
  823. #define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
  824. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  825. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  826. BIT(POWER_DOMAIN_AUX_D) | \
  827. BIT(POWER_DOMAIN_INIT))
  828. #define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
  829. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  830. BIT(POWER_DOMAIN_AUX_D) | \
  831. BIT(POWER_DOMAIN_INIT))
  832. static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
  833. .sync_hw = i9xx_always_on_power_well_noop,
  834. .enable = i9xx_always_on_power_well_noop,
  835. .disable = i9xx_always_on_power_well_noop,
  836. .is_enabled = i9xx_always_on_power_well_enabled,
  837. };
  838. static const struct i915_power_well_ops chv_pipe_power_well_ops = {
  839. .sync_hw = chv_pipe_power_well_sync_hw,
  840. .enable = chv_pipe_power_well_enable,
  841. .disable = chv_pipe_power_well_disable,
  842. .is_enabled = chv_pipe_power_well_enabled,
  843. };
  844. static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
  845. .sync_hw = vlv_power_well_sync_hw,
  846. .enable = chv_dpio_cmn_power_well_enable,
  847. .disable = chv_dpio_cmn_power_well_disable,
  848. .is_enabled = vlv_power_well_enabled,
  849. };
  850. static struct i915_power_well i9xx_always_on_power_well[] = {
  851. {
  852. .name = "always-on",
  853. .always_on = 1,
  854. .domains = POWER_DOMAIN_MASK,
  855. .ops = &i9xx_always_on_power_well_ops,
  856. },
  857. };
  858. static const struct i915_power_well_ops hsw_power_well_ops = {
  859. .sync_hw = hsw_power_well_sync_hw,
  860. .enable = hsw_power_well_enable,
  861. .disable = hsw_power_well_disable,
  862. .is_enabled = hsw_power_well_enabled,
  863. };
  864. static const struct i915_power_well_ops skl_power_well_ops = {
  865. .sync_hw = skl_power_well_sync_hw,
  866. .enable = skl_power_well_enable,
  867. .disable = skl_power_well_disable,
  868. .is_enabled = skl_power_well_enabled,
  869. };
  870. static struct i915_power_well hsw_power_wells[] = {
  871. {
  872. .name = "always-on",
  873. .always_on = 1,
  874. .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
  875. .ops = &i9xx_always_on_power_well_ops,
  876. },
  877. {
  878. .name = "display",
  879. .domains = HSW_DISPLAY_POWER_DOMAINS,
  880. .ops = &hsw_power_well_ops,
  881. },
  882. };
  883. static struct i915_power_well bdw_power_wells[] = {
  884. {
  885. .name = "always-on",
  886. .always_on = 1,
  887. .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
  888. .ops = &i9xx_always_on_power_well_ops,
  889. },
  890. {
  891. .name = "display",
  892. .domains = BDW_DISPLAY_POWER_DOMAINS,
  893. .ops = &hsw_power_well_ops,
  894. },
  895. };
  896. static const struct i915_power_well_ops vlv_display_power_well_ops = {
  897. .sync_hw = vlv_power_well_sync_hw,
  898. .enable = vlv_display_power_well_enable,
  899. .disable = vlv_display_power_well_disable,
  900. .is_enabled = vlv_power_well_enabled,
  901. };
  902. static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
  903. .sync_hw = vlv_power_well_sync_hw,
  904. .enable = vlv_dpio_cmn_power_well_enable,
  905. .disable = vlv_dpio_cmn_power_well_disable,
  906. .is_enabled = vlv_power_well_enabled,
  907. };
  908. static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
  909. .sync_hw = vlv_power_well_sync_hw,
  910. .enable = vlv_power_well_enable,
  911. .disable = vlv_power_well_disable,
  912. .is_enabled = vlv_power_well_enabled,
  913. };
  914. static struct i915_power_well vlv_power_wells[] = {
  915. {
  916. .name = "always-on",
  917. .always_on = 1,
  918. .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
  919. .ops = &i9xx_always_on_power_well_ops,
  920. },
  921. {
  922. .name = "display",
  923. .domains = VLV_DISPLAY_POWER_DOMAINS,
  924. .data = PUNIT_POWER_WELL_DISP2D,
  925. .ops = &vlv_display_power_well_ops,
  926. },
  927. {
  928. .name = "dpio-tx-b-01",
  929. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  930. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  931. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  932. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  933. .ops = &vlv_dpio_power_well_ops,
  934. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
  935. },
  936. {
  937. .name = "dpio-tx-b-23",
  938. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  939. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  940. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  941. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  942. .ops = &vlv_dpio_power_well_ops,
  943. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
  944. },
  945. {
  946. .name = "dpio-tx-c-01",
  947. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  948. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  949. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  950. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  951. .ops = &vlv_dpio_power_well_ops,
  952. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
  953. },
  954. {
  955. .name = "dpio-tx-c-23",
  956. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  957. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  958. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  959. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  960. .ops = &vlv_dpio_power_well_ops,
  961. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
  962. },
  963. {
  964. .name = "dpio-common",
  965. .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
  966. .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
  967. .ops = &vlv_dpio_cmn_power_well_ops,
  968. },
  969. };
  970. static struct i915_power_well chv_power_wells[] = {
  971. {
  972. .name = "always-on",
  973. .always_on = 1,
  974. .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
  975. .ops = &i9xx_always_on_power_well_ops,
  976. },
  977. #if 0
  978. {
  979. .name = "display",
  980. .domains = VLV_DISPLAY_POWER_DOMAINS,
  981. .data = PUNIT_POWER_WELL_DISP2D,
  982. .ops = &vlv_display_power_well_ops,
  983. },
  984. #endif
  985. {
  986. .name = "pipe-a",
  987. /*
  988. * FIXME: pipe A power well seems to be the new disp2d well.
  989. * At least all registers seem to be housed there. Figure
  990. * out if this a a temporary situation in pre-production
  991. * hardware or a permanent state of affairs.
  992. */
  993. .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
  994. .data = PIPE_A,
  995. .ops = &chv_pipe_power_well_ops,
  996. },
  997. #if 0
  998. {
  999. .name = "pipe-b",
  1000. .domains = CHV_PIPE_B_POWER_DOMAINS,
  1001. .data = PIPE_B,
  1002. .ops = &chv_pipe_power_well_ops,
  1003. },
  1004. {
  1005. .name = "pipe-c",
  1006. .domains = CHV_PIPE_C_POWER_DOMAINS,
  1007. .data = PIPE_C,
  1008. .ops = &chv_pipe_power_well_ops,
  1009. },
  1010. #endif
  1011. {
  1012. .name = "dpio-common-bc",
  1013. /*
  1014. * XXX: cmnreset for one PHY seems to disturb the other.
  1015. * As a workaround keep both powered on at the same
  1016. * time for now.
  1017. */
  1018. .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
  1019. .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
  1020. .ops = &chv_dpio_cmn_power_well_ops,
  1021. },
  1022. {
  1023. .name = "dpio-common-d",
  1024. /*
  1025. * XXX: cmnreset for one PHY seems to disturb the other.
  1026. * As a workaround keep both powered on at the same
  1027. * time for now.
  1028. */
  1029. .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
  1030. .data = PUNIT_POWER_WELL_DPIO_CMN_D,
  1031. .ops = &chv_dpio_cmn_power_well_ops,
  1032. },
  1033. #if 0
  1034. {
  1035. .name = "dpio-tx-b-01",
  1036. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1037. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
  1038. .ops = &vlv_dpio_power_well_ops,
  1039. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
  1040. },
  1041. {
  1042. .name = "dpio-tx-b-23",
  1043. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  1044. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
  1045. .ops = &vlv_dpio_power_well_ops,
  1046. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
  1047. },
  1048. {
  1049. .name = "dpio-tx-c-01",
  1050. .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1051. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1052. .ops = &vlv_dpio_power_well_ops,
  1053. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
  1054. },
  1055. {
  1056. .name = "dpio-tx-c-23",
  1057. .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  1058. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  1059. .ops = &vlv_dpio_power_well_ops,
  1060. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
  1061. },
  1062. {
  1063. .name = "dpio-tx-d-01",
  1064. .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
  1065. CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
  1066. .ops = &vlv_dpio_power_well_ops,
  1067. .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
  1068. },
  1069. {
  1070. .name = "dpio-tx-d-23",
  1071. .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
  1072. CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
  1073. .ops = &vlv_dpio_power_well_ops,
  1074. .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
  1075. },
  1076. #endif
  1077. };
  1078. static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
  1079. enum punit_power_well power_well_id)
  1080. {
  1081. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1082. struct i915_power_well *power_well;
  1083. int i;
  1084. for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
  1085. if (power_well->data == power_well_id)
  1086. return power_well;
  1087. }
  1088. return NULL;
  1089. }
  1090. static struct i915_power_well skl_power_wells[] = {
  1091. {
  1092. .name = "always-on",
  1093. .always_on = 1,
  1094. .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
  1095. .ops = &i9xx_always_on_power_well_ops,
  1096. },
  1097. {
  1098. .name = "power well 1",
  1099. .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
  1100. .ops = &skl_power_well_ops,
  1101. .data = SKL_DISP_PW_1,
  1102. },
  1103. {
  1104. .name = "MISC IO power well",
  1105. .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
  1106. .ops = &skl_power_well_ops,
  1107. .data = SKL_DISP_PW_MISC_IO,
  1108. },
  1109. {
  1110. .name = "power well 2",
  1111. .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
  1112. .ops = &skl_power_well_ops,
  1113. .data = SKL_DISP_PW_2,
  1114. },
  1115. {
  1116. .name = "DDI A/E power well",
  1117. .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
  1118. .ops = &skl_power_well_ops,
  1119. .data = SKL_DISP_PW_DDI_A_E,
  1120. },
  1121. {
  1122. .name = "DDI B power well",
  1123. .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
  1124. .ops = &skl_power_well_ops,
  1125. .data = SKL_DISP_PW_DDI_B,
  1126. },
  1127. {
  1128. .name = "DDI C power well",
  1129. .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
  1130. .ops = &skl_power_well_ops,
  1131. .data = SKL_DISP_PW_DDI_C,
  1132. },
  1133. {
  1134. .name = "DDI D power well",
  1135. .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
  1136. .ops = &skl_power_well_ops,
  1137. .data = SKL_DISP_PW_DDI_D,
  1138. },
  1139. };
  1140. #define set_power_wells(power_domains, __power_wells) ({ \
  1141. (power_domains)->power_wells = (__power_wells); \
  1142. (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
  1143. })
  1144. /**
  1145. * intel_power_domains_init - initializes the power domain structures
  1146. * @dev_priv: i915 device instance
  1147. *
  1148. * Initializes the power domain structures for @dev_priv depending upon the
  1149. * supported platform.
  1150. */
  1151. int intel_power_domains_init(struct drm_i915_private *dev_priv)
  1152. {
  1153. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1154. mutex_init(&power_domains->lock);
  1155. /*
  1156. * The enabling order will be from lower to higher indexed wells,
  1157. * the disabling order is reversed.
  1158. */
  1159. if (IS_HASWELL(dev_priv->dev)) {
  1160. set_power_wells(power_domains, hsw_power_wells);
  1161. } else if (IS_BROADWELL(dev_priv->dev)) {
  1162. set_power_wells(power_domains, bdw_power_wells);
  1163. } else if (IS_SKYLAKE(dev_priv->dev)) {
  1164. set_power_wells(power_domains, skl_power_wells);
  1165. } else if (IS_CHERRYVIEW(dev_priv->dev)) {
  1166. set_power_wells(power_domains, chv_power_wells);
  1167. } else if (IS_VALLEYVIEW(dev_priv->dev)) {
  1168. set_power_wells(power_domains, vlv_power_wells);
  1169. } else {
  1170. set_power_wells(power_domains, i9xx_always_on_power_well);
  1171. }
  1172. return 0;
  1173. }
  1174. static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
  1175. {
  1176. struct drm_device *dev = dev_priv->dev;
  1177. struct device *device = &dev->pdev->dev;
  1178. if (!HAS_RUNTIME_PM(dev))
  1179. return;
  1180. if (!intel_enable_rc6(dev))
  1181. return;
  1182. /* Make sure we're not suspended first. */
  1183. pm_runtime_get_sync(device);
  1184. pm_runtime_disable(device);
  1185. }
  1186. /**
  1187. * intel_power_domains_fini - finalizes the power domain structures
  1188. * @dev_priv: i915 device instance
  1189. *
  1190. * Finalizes the power domain structures for @dev_priv depending upon the
  1191. * supported platform. This function also disables runtime pm and ensures that
  1192. * the device stays powered up so that the driver can be reloaded.
  1193. */
  1194. void intel_power_domains_fini(struct drm_i915_private *dev_priv)
  1195. {
  1196. intel_runtime_pm_disable(dev_priv);
  1197. /* The i915.ko module is still not prepared to be loaded when
  1198. * the power well is not enabled, so just enable it in case
  1199. * we're going to unload/reload. */
  1200. intel_display_set_init_power(dev_priv, true);
  1201. }
  1202. static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
  1203. {
  1204. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1205. struct i915_power_well *power_well;
  1206. int i;
  1207. mutex_lock(&power_domains->lock);
  1208. for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
  1209. power_well->ops->sync_hw(dev_priv, power_well);
  1210. power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
  1211. power_well);
  1212. }
  1213. mutex_unlock(&power_domains->lock);
  1214. }
  1215. static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
  1216. {
  1217. struct i915_power_well *cmn =
  1218. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  1219. struct i915_power_well *disp2d =
  1220. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
  1221. /* If the display might be already active skip this */
  1222. if (cmn->ops->is_enabled(dev_priv, cmn) &&
  1223. disp2d->ops->is_enabled(dev_priv, disp2d) &&
  1224. I915_READ(DPIO_CTL) & DPIO_CMNRST)
  1225. return;
  1226. DRM_DEBUG_KMS("toggling display PHY side reset\n");
  1227. /* cmnlane needs DPLL registers */
  1228. disp2d->ops->enable(dev_priv, disp2d);
  1229. /*
  1230. * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
  1231. * Need to assert and de-assert PHY SB reset by gating the
  1232. * common lane power, then un-gating it.
  1233. * Simply ungating isn't enough to reset the PHY enough to get
  1234. * ports and lanes running.
  1235. */
  1236. cmn->ops->disable(dev_priv, cmn);
  1237. }
  1238. /**
  1239. * intel_power_domains_init_hw - initialize hardware power domain state
  1240. * @dev_priv: i915 device instance
  1241. *
  1242. * This function initializes the hardware power domain state and enables all
  1243. * power domains using intel_display_set_init_power().
  1244. */
  1245. void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
  1246. {
  1247. struct drm_device *dev = dev_priv->dev;
  1248. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1249. power_domains->initializing = true;
  1250. if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
  1251. mutex_lock(&power_domains->lock);
  1252. vlv_cmnlane_wa(dev_priv);
  1253. mutex_unlock(&power_domains->lock);
  1254. }
  1255. /* For now, we need the power well to be always enabled. */
  1256. intel_display_set_init_power(dev_priv, true);
  1257. intel_power_domains_resume(dev_priv);
  1258. power_domains->initializing = false;
  1259. }
  1260. /**
  1261. * intel_aux_display_runtime_get - grab an auxiliary power domain reference
  1262. * @dev_priv: i915 device instance
  1263. *
  1264. * This function grabs a power domain reference for the auxiliary power domain
  1265. * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
  1266. * parents are powered up. Therefore users should only grab a reference to the
  1267. * innermost power domain they need.
  1268. *
  1269. * Any power domain reference obtained by this function must have a symmetric
  1270. * call to intel_aux_display_runtime_put() to release the reference again.
  1271. */
  1272. void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
  1273. {
  1274. intel_runtime_pm_get(dev_priv);
  1275. }
  1276. /**
  1277. * intel_aux_display_runtime_put - release an auxiliary power domain reference
  1278. * @dev_priv: i915 device instance
  1279. *
  1280. * This function drops the auxiliary power domain reference obtained by
  1281. * intel_aux_display_runtime_get() and might power down the corresponding
  1282. * hardware block right away if this is the last reference.
  1283. */
  1284. void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
  1285. {
  1286. intel_runtime_pm_put(dev_priv);
  1287. }
  1288. /**
  1289. * intel_runtime_pm_get - grab a runtime pm reference
  1290. * @dev_priv: i915 device instance
  1291. *
  1292. * This function grabs a device-level runtime pm reference (mostly used for GEM
  1293. * code to ensure the GTT or GT is on) and ensures that it is powered up.
  1294. *
  1295. * Any runtime pm reference obtained by this function must have a symmetric
  1296. * call to intel_runtime_pm_put() to release the reference again.
  1297. */
  1298. void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  1299. {
  1300. struct drm_device *dev = dev_priv->dev;
  1301. struct device *device = &dev->pdev->dev;
  1302. if (!HAS_RUNTIME_PM(dev))
  1303. return;
  1304. pm_runtime_get_sync(device);
  1305. WARN(dev_priv->pm.suspended, "Device still suspended.\n");
  1306. }
  1307. /**
  1308. * intel_runtime_pm_get_noresume - grab a runtime pm reference
  1309. * @dev_priv: i915 device instance
  1310. *
  1311. * This function grabs a device-level runtime pm reference (mostly used for GEM
  1312. * code to ensure the GTT or GT is on).
  1313. *
  1314. * It will _not_ power up the device but instead only check that it's powered
  1315. * on. Therefore it is only valid to call this functions from contexts where
  1316. * the device is known to be powered up and where trying to power it up would
  1317. * result in hilarity and deadlocks. That pretty much means only the system
  1318. * suspend/resume code where this is used to grab runtime pm references for
  1319. * delayed setup down in work items.
  1320. *
  1321. * Any runtime pm reference obtained by this function must have a symmetric
  1322. * call to intel_runtime_pm_put() to release the reference again.
  1323. */
  1324. void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  1325. {
  1326. struct drm_device *dev = dev_priv->dev;
  1327. struct device *device = &dev->pdev->dev;
  1328. if (!HAS_RUNTIME_PM(dev))
  1329. return;
  1330. WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
  1331. pm_runtime_get_noresume(device);
  1332. }
  1333. /**
  1334. * intel_runtime_pm_put - release a runtime pm reference
  1335. * @dev_priv: i915 device instance
  1336. *
  1337. * This function drops the device-level runtime pm reference obtained by
  1338. * intel_runtime_pm_get() and might power down the corresponding
  1339. * hardware block right away if this is the last reference.
  1340. */
  1341. void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  1342. {
  1343. struct drm_device *dev = dev_priv->dev;
  1344. struct device *device = &dev->pdev->dev;
  1345. if (!HAS_RUNTIME_PM(dev))
  1346. return;
  1347. pm_runtime_mark_last_busy(device);
  1348. pm_runtime_put_autosuspend(device);
  1349. }
  1350. /**
  1351. * intel_runtime_pm_enable - enable runtime pm
  1352. * @dev_priv: i915 device instance
  1353. *
  1354. * This function enables runtime pm at the end of the driver load sequence.
  1355. *
  1356. * Note that this function does currently not enable runtime pm for the
  1357. * subordinate display power domains. That is only done on the first modeset
  1358. * using intel_display_set_init_power().
  1359. */
  1360. void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
  1361. {
  1362. struct drm_device *dev = dev_priv->dev;
  1363. struct device *device = &dev->pdev->dev;
  1364. if (!HAS_RUNTIME_PM(dev))
  1365. return;
  1366. pm_runtime_set_active(device);
  1367. /*
  1368. * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
  1369. * requirement.
  1370. */
  1371. if (!intel_enable_rc6(dev)) {
  1372. DRM_INFO("RC6 disabled, disabling runtime PM support\n");
  1373. return;
  1374. }
  1375. pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
  1376. pm_runtime_mark_last_busy(device);
  1377. pm_runtime_use_autosuspend(device);
  1378. pm_runtime_put_autosuspend(device);
  1379. }