intel_runtime_pm.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323
  1. /*
  2. * Copyright © 2012-2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eugeni Dodonov <eugeni.dodonov@intel.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. *
  27. */
  28. #include <linux/pm_runtime.h>
  29. #include <linux/vgaarb.h>
  30. #include "i915_drv.h"
  31. #include "intel_drv.h"
  32. /**
  33. * DOC: runtime pm
  34. *
  35. * The i915 driver supports dynamic enabling and disabling of entire hardware
  36. * blocks at runtime. This is especially important on the display side where
  37. * software is supposed to control many power gates manually on recent hardware,
  38. * since on the GT side a lot of the power management is done by the hardware.
  39. * But even there some manual control at the device level is required.
  40. *
  41. * Since i915 supports a diverse set of platforms with a unified codebase and
  42. * hardware engineers just love to shuffle functionality around between power
  43. * domains there's a sizeable amount of indirection required. This file provides
  44. * generic functions to the driver for grabbing and releasing references for
  45. * abstract power domains. It then maps those to the actual power wells
  46. * present for a given platform.
  47. */
  48. #define for_each_power_well(i, power_well, domain_mask, power_domains) \
  49. for (i = 0; \
  50. i < (power_domains)->power_well_count && \
  51. ((power_well) = &(power_domains)->power_wells[i]); \
  52. i++) \
  53. if ((power_well)->domains & (domain_mask))
  54. #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
  55. for (i = (power_domains)->power_well_count - 1; \
  56. i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
  57. i--) \
  58. if ((power_well)->domains & (domain_mask))
  59. /*
  60. * We should only use the power well if we explicitly asked the hardware to
  61. * enable it, so check if it's enabled and also check if we've requested it to
  62. * be enabled.
  63. */
  64. static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
  65. struct i915_power_well *power_well)
  66. {
  67. return I915_READ(HSW_PWR_WELL_DRIVER) ==
  68. (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
  69. }
  70. /**
  71. * __intel_display_power_is_enabled - unlocked check for a power domain
  72. * @dev_priv: i915 device instance
  73. * @domain: power domain to check
  74. *
  75. * This is the unlocked version of intel_display_power_is_enabled() and should
  76. * only be used from error capture and recovery code where deadlocks are
  77. * possible.
  78. *
  79. * Returns:
  80. * True when the power domain is enabled, false otherwise.
  81. */
  82. bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  83. enum intel_display_power_domain domain)
  84. {
  85. struct i915_power_domains *power_domains;
  86. struct i915_power_well *power_well;
  87. bool is_enabled;
  88. int i;
  89. if (dev_priv->pm.suspended)
  90. return false;
  91. power_domains = &dev_priv->power_domains;
  92. is_enabled = true;
  93. for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
  94. if (power_well->always_on)
  95. continue;
  96. if (!power_well->hw_enabled) {
  97. is_enabled = false;
  98. break;
  99. }
  100. }
  101. return is_enabled;
  102. }
  103. /**
  104. * intel_display_power_is_enabled - check for a power domain
  105. * @dev_priv: i915 device instance
  106. * @domain: power domain to check
  107. *
  108. * This function can be used to check the hw power domain state. It is mostly
  109. * used in hardware state readout functions. Everywhere else code should rely
  110. * upon explicit power domain reference counting to ensure that the hardware
  111. * block is powered up before accessing it.
  112. *
  113. * Callers must hold the relevant modesetting locks to ensure that concurrent
  114. * threads can't disable the power well while the caller tries to read a few
  115. * registers.
  116. *
  117. * Returns:
  118. * True when the power domain is enabled, false otherwise.
  119. */
  120. bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
  121. enum intel_display_power_domain domain)
  122. {
  123. struct i915_power_domains *power_domains;
  124. bool ret;
  125. power_domains = &dev_priv->power_domains;
  126. mutex_lock(&power_domains->lock);
  127. ret = __intel_display_power_is_enabled(dev_priv, domain);
  128. mutex_unlock(&power_domains->lock);
  129. return ret;
  130. }
  131. /**
  132. * intel_display_set_init_power - set the initial power domain state
  133. * @dev_priv: i915 device instance
  134. * @enable: whether to enable or disable the initial power domain state
  135. *
  136. * For simplicity our driver load/unload and system suspend/resume code assumes
  137. * that all power domains are always enabled. This functions controls the state
  138. * of this little hack. While the initial power domain state is enabled runtime
  139. * pm is effectively disabled.
  140. */
  141. void intel_display_set_init_power(struct drm_i915_private *dev_priv,
  142. bool enable)
  143. {
  144. if (dev_priv->power_domains.init_power_on == enable)
  145. return;
  146. if (enable)
  147. intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  148. else
  149. intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
  150. dev_priv->power_domains.init_power_on = enable;
  151. }
  152. /*
  153. * Starting with Haswell, we have a "Power Down Well" that can be turned off
  154. * when not needed anymore. We have 4 registers that can request the power well
  155. * to be enabled, and it will only be disabled if none of the registers is
  156. * requesting it to be enabled.
  157. */
  158. static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
  159. {
  160. struct drm_device *dev = dev_priv->dev;
  161. /*
  162. * After we re-enable the power well, if we touch VGA register 0x3d5
  163. * we'll get unclaimed register interrupts. This stops after we write
  164. * anything to the VGA MSR register. The vgacon module uses this
  165. * register all the time, so if we unbind our driver and, as a
  166. * consequence, bind vgacon, we'll get stuck in an infinite loop at
  167. * console_unlock(). So make here we touch the VGA MSR register, making
  168. * sure vgacon can keep working normally without triggering interrupts
  169. * and error messages.
  170. */
  171. vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
  172. outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  173. vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
  174. if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
  175. gen8_irq_power_well_post_enable(dev_priv);
  176. }
  177. static void hsw_set_power_well(struct drm_i915_private *dev_priv,
  178. struct i915_power_well *power_well, bool enable)
  179. {
  180. bool is_enabled, enable_requested;
  181. uint32_t tmp;
  182. tmp = I915_READ(HSW_PWR_WELL_DRIVER);
  183. is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
  184. enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
  185. if (enable) {
  186. if (!enable_requested)
  187. I915_WRITE(HSW_PWR_WELL_DRIVER,
  188. HSW_PWR_WELL_ENABLE_REQUEST);
  189. if (!is_enabled) {
  190. DRM_DEBUG_KMS("Enabling power well\n");
  191. if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
  192. HSW_PWR_WELL_STATE_ENABLED), 20))
  193. DRM_ERROR("Timeout enabling power well\n");
  194. hsw_power_well_post_enable(dev_priv);
  195. }
  196. } else {
  197. if (enable_requested) {
  198. I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
  199. POSTING_READ(HSW_PWR_WELL_DRIVER);
  200. DRM_DEBUG_KMS("Requesting to disable the power well\n");
  201. }
  202. }
  203. }
  204. static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
  205. struct i915_power_well *power_well)
  206. {
  207. hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
  208. /*
  209. * We're taking over the BIOS, so clear any requests made by it since
  210. * the driver is in charge now.
  211. */
  212. if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
  213. I915_WRITE(HSW_PWR_WELL_BIOS, 0);
  214. }
  215. static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
  216. struct i915_power_well *power_well)
  217. {
  218. hsw_set_power_well(dev_priv, power_well, true);
  219. }
  220. static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
  221. struct i915_power_well *power_well)
  222. {
  223. hsw_set_power_well(dev_priv, power_well, false);
  224. }
  225. static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
  226. struct i915_power_well *power_well)
  227. {
  228. }
  229. static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
  230. struct i915_power_well *power_well)
  231. {
  232. return true;
  233. }
  234. static void vlv_set_power_well(struct drm_i915_private *dev_priv,
  235. struct i915_power_well *power_well, bool enable)
  236. {
  237. enum punit_power_well power_well_id = power_well->data;
  238. u32 mask;
  239. u32 state;
  240. u32 ctrl;
  241. mask = PUNIT_PWRGT_MASK(power_well_id);
  242. state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
  243. PUNIT_PWRGT_PWR_GATE(power_well_id);
  244. mutex_lock(&dev_priv->rps.hw_lock);
  245. #define COND \
  246. ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
  247. if (COND)
  248. goto out;
  249. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
  250. ctrl &= ~mask;
  251. ctrl |= state;
  252. vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
  253. if (wait_for(COND, 100))
  254. DRM_ERROR("timout setting power well state %08x (%08x)\n",
  255. state,
  256. vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
  257. #undef COND
  258. out:
  259. mutex_unlock(&dev_priv->rps.hw_lock);
  260. }
  261. static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
  262. struct i915_power_well *power_well)
  263. {
  264. vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
  265. }
  266. static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
  267. struct i915_power_well *power_well)
  268. {
  269. vlv_set_power_well(dev_priv, power_well, true);
  270. }
  271. static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
  272. struct i915_power_well *power_well)
  273. {
  274. vlv_set_power_well(dev_priv, power_well, false);
  275. }
  276. static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
  277. struct i915_power_well *power_well)
  278. {
  279. int power_well_id = power_well->data;
  280. bool enabled = false;
  281. u32 mask;
  282. u32 state;
  283. u32 ctrl;
  284. mask = PUNIT_PWRGT_MASK(power_well_id);
  285. ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
  286. mutex_lock(&dev_priv->rps.hw_lock);
  287. state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
  288. /*
  289. * We only ever set the power-on and power-gate states, anything
  290. * else is unexpected.
  291. */
  292. WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
  293. state != PUNIT_PWRGT_PWR_GATE(power_well_id));
  294. if (state == ctrl)
  295. enabled = true;
  296. /*
  297. * A transient state at this point would mean some unexpected party
  298. * is poking at the power controls too.
  299. */
  300. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
  301. WARN_ON(ctrl != state);
  302. mutex_unlock(&dev_priv->rps.hw_lock);
  303. return enabled;
  304. }
  305. static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
  306. struct i915_power_well *power_well)
  307. {
  308. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
  309. vlv_set_power_well(dev_priv, power_well, true);
  310. spin_lock_irq(&dev_priv->irq_lock);
  311. valleyview_enable_display_irqs(dev_priv);
  312. spin_unlock_irq(&dev_priv->irq_lock);
  313. /*
  314. * During driver initialization/resume we can avoid restoring the
  315. * part of the HW/SW state that will be inited anyway explicitly.
  316. */
  317. if (dev_priv->power_domains.initializing)
  318. return;
  319. intel_hpd_init(dev_priv);
  320. i915_redisable_vga_power_on(dev_priv->dev);
  321. }
  322. static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
  323. struct i915_power_well *power_well)
  324. {
  325. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
  326. spin_lock_irq(&dev_priv->irq_lock);
  327. valleyview_disable_display_irqs(dev_priv);
  328. spin_unlock_irq(&dev_priv->irq_lock);
  329. vlv_set_power_well(dev_priv, power_well, false);
  330. vlv_power_sequencer_reset(dev_priv);
  331. }
  332. static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  333. struct i915_power_well *power_well)
  334. {
  335. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
  336. /*
  337. * Enable the CRI clock source so we can get at the
  338. * display and the reference clock for VGA
  339. * hotplug / manual detection.
  340. */
  341. I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
  342. DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
  343. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  344. vlv_set_power_well(dev_priv, power_well, true);
  345. /*
  346. * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
  347. * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
  348. * a. GUnit 0x2110 bit[0] set to 1 (def 0)
  349. * b. The other bits such as sfr settings / modesel may all
  350. * be set to 0.
  351. *
  352. * This should only be done on init and resume from S3 with
  353. * both PLLs disabled, or we risk losing DPIO and PLL
  354. * synchronization.
  355. */
  356. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
  357. }
  358. static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  359. struct i915_power_well *power_well)
  360. {
  361. enum pipe pipe;
  362. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
  363. for_each_pipe(dev_priv, pipe)
  364. assert_pll_disabled(dev_priv, pipe);
  365. /* Assert common reset */
  366. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
  367. vlv_set_power_well(dev_priv, power_well, false);
  368. }
  369. static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
  370. struct i915_power_well *power_well)
  371. {
  372. enum dpio_phy phy;
  373. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  374. power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
  375. /*
  376. * Enable the CRI clock source so we can get at the
  377. * display and the reference clock for VGA
  378. * hotplug / manual detection.
  379. */
  380. if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  381. phy = DPIO_PHY0;
  382. I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
  383. DPLL_REFA_CLK_ENABLE_VLV);
  384. I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
  385. DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
  386. } else {
  387. phy = DPIO_PHY1;
  388. I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
  389. DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
  390. }
  391. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  392. vlv_set_power_well(dev_priv, power_well, true);
  393. /* Poll for phypwrgood signal */
  394. if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
  395. DRM_ERROR("Display PHY %d is not power up\n", phy);
  396. I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
  397. PHY_COM_LANE_RESET_DEASSERT(phy));
  398. }
  399. static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
  400. struct i915_power_well *power_well)
  401. {
  402. enum dpio_phy phy;
  403. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
  404. power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
  405. if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  406. phy = DPIO_PHY0;
  407. assert_pll_disabled(dev_priv, PIPE_A);
  408. assert_pll_disabled(dev_priv, PIPE_B);
  409. } else {
  410. phy = DPIO_PHY1;
  411. assert_pll_disabled(dev_priv, PIPE_C);
  412. }
  413. I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
  414. ~PHY_COM_LANE_RESET_DEASSERT(phy));
  415. vlv_set_power_well(dev_priv, power_well, false);
  416. }
  417. static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
  418. struct i915_power_well *power_well)
  419. {
  420. enum pipe pipe = power_well->data;
  421. bool enabled;
  422. u32 state, ctrl;
  423. mutex_lock(&dev_priv->rps.hw_lock);
  424. state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
  425. /*
  426. * We only ever set the power-on and power-gate states, anything
  427. * else is unexpected.
  428. */
  429. WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
  430. enabled = state == DP_SSS_PWR_ON(pipe);
  431. /*
  432. * A transient state at this point would mean some unexpected party
  433. * is poking at the power controls too.
  434. */
  435. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
  436. WARN_ON(ctrl << 16 != state);
  437. mutex_unlock(&dev_priv->rps.hw_lock);
  438. return enabled;
  439. }
  440. static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
  441. struct i915_power_well *power_well,
  442. bool enable)
  443. {
  444. enum pipe pipe = power_well->data;
  445. u32 state;
  446. u32 ctrl;
  447. state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
  448. mutex_lock(&dev_priv->rps.hw_lock);
  449. #define COND \
  450. ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
  451. if (COND)
  452. goto out;
  453. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  454. ctrl &= ~DP_SSC_MASK(pipe);
  455. ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
  456. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
  457. if (wait_for(COND, 100))
  458. DRM_ERROR("timout setting power well state %08x (%08x)\n",
  459. state,
  460. vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
  461. #undef COND
  462. out:
  463. mutex_unlock(&dev_priv->rps.hw_lock);
  464. }
  465. static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
  466. struct i915_power_well *power_well)
  467. {
  468. chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
  469. }
  470. static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
  471. struct i915_power_well *power_well)
  472. {
  473. WARN_ON_ONCE(power_well->data != PIPE_A &&
  474. power_well->data != PIPE_B &&
  475. power_well->data != PIPE_C);
  476. chv_set_pipe_power_well(dev_priv, power_well, true);
  477. if (power_well->data == PIPE_A) {
  478. spin_lock_irq(&dev_priv->irq_lock);
  479. valleyview_enable_display_irqs(dev_priv);
  480. spin_unlock_irq(&dev_priv->irq_lock);
  481. /*
  482. * During driver initialization/resume we can avoid restoring the
  483. * part of the HW/SW state that will be inited anyway explicitly.
  484. */
  485. if (dev_priv->power_domains.initializing)
  486. return;
  487. intel_hpd_init(dev_priv);
  488. i915_redisable_vga_power_on(dev_priv->dev);
  489. }
  490. }
  491. static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
  492. struct i915_power_well *power_well)
  493. {
  494. WARN_ON_ONCE(power_well->data != PIPE_A &&
  495. power_well->data != PIPE_B &&
  496. power_well->data != PIPE_C);
  497. if (power_well->data == PIPE_A) {
  498. spin_lock_irq(&dev_priv->irq_lock);
  499. valleyview_disable_display_irqs(dev_priv);
  500. spin_unlock_irq(&dev_priv->irq_lock);
  501. }
  502. chv_set_pipe_power_well(dev_priv, power_well, false);
  503. if (power_well->data == PIPE_A)
  504. vlv_power_sequencer_reset(dev_priv);
  505. }
  506. /**
  507. * intel_display_power_get - grab a power domain reference
  508. * @dev_priv: i915 device instance
  509. * @domain: power domain to reference
  510. *
  511. * This function grabs a power domain reference for @domain and ensures that the
  512. * power domain and all its parents are powered up. Therefore users should only
  513. * grab a reference to the innermost power domain they need.
  514. *
  515. * Any power domain reference obtained by this function must have a symmetric
  516. * call to intel_display_power_put() to release the reference again.
  517. */
  518. void intel_display_power_get(struct drm_i915_private *dev_priv,
  519. enum intel_display_power_domain domain)
  520. {
  521. struct i915_power_domains *power_domains;
  522. struct i915_power_well *power_well;
  523. int i;
  524. intel_runtime_pm_get(dev_priv);
  525. power_domains = &dev_priv->power_domains;
  526. mutex_lock(&power_domains->lock);
  527. for_each_power_well(i, power_well, BIT(domain), power_domains) {
  528. if (!power_well->count++) {
  529. DRM_DEBUG_KMS("enabling %s\n", power_well->name);
  530. power_well->ops->enable(dev_priv, power_well);
  531. power_well->hw_enabled = true;
  532. }
  533. }
  534. power_domains->domain_use_count[domain]++;
  535. mutex_unlock(&power_domains->lock);
  536. }
  537. /**
  538. * intel_display_power_put - release a power domain reference
  539. * @dev_priv: i915 device instance
  540. * @domain: power domain to reference
  541. *
  542. * This function drops the power domain reference obtained by
  543. * intel_display_power_get() and might power down the corresponding hardware
  544. * block right away if this is the last reference.
  545. */
  546. void intel_display_power_put(struct drm_i915_private *dev_priv,
  547. enum intel_display_power_domain domain)
  548. {
  549. struct i915_power_domains *power_domains;
  550. struct i915_power_well *power_well;
  551. int i;
  552. power_domains = &dev_priv->power_domains;
  553. mutex_lock(&power_domains->lock);
  554. WARN_ON(!power_domains->domain_use_count[domain]);
  555. power_domains->domain_use_count[domain]--;
  556. for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
  557. WARN_ON(!power_well->count);
  558. if (!--power_well->count && i915.disable_power_well) {
  559. DRM_DEBUG_KMS("disabling %s\n", power_well->name);
  560. power_well->hw_enabled = false;
  561. power_well->ops->disable(dev_priv, power_well);
  562. }
  563. }
  564. mutex_unlock(&power_domains->lock);
  565. intel_runtime_pm_put(dev_priv);
  566. }
  567. #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
  568. #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
  569. BIT(POWER_DOMAIN_PIPE_A) | \
  570. BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
  571. BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
  572. BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
  573. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  574. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  575. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  576. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  577. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  578. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  579. BIT(POWER_DOMAIN_PORT_CRT) | \
  580. BIT(POWER_DOMAIN_PLLS) | \
  581. BIT(POWER_DOMAIN_INIT))
  582. #define HSW_DISPLAY_POWER_DOMAINS ( \
  583. (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
  584. BIT(POWER_DOMAIN_INIT))
  585. #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
  586. HSW_ALWAYS_ON_POWER_DOMAINS | \
  587. BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
  588. #define BDW_DISPLAY_POWER_DOMAINS ( \
  589. (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
  590. BIT(POWER_DOMAIN_INIT))
  591. #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
  592. #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
  593. #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
  594. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  595. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  596. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  597. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  598. BIT(POWER_DOMAIN_PORT_CRT) | \
  599. BIT(POWER_DOMAIN_INIT))
  600. #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
  601. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  602. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  603. BIT(POWER_DOMAIN_INIT))
  604. #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
  605. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  606. BIT(POWER_DOMAIN_INIT))
  607. #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
  608. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  609. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  610. BIT(POWER_DOMAIN_INIT))
  611. #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
  612. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  613. BIT(POWER_DOMAIN_INIT))
  614. #define CHV_PIPE_A_POWER_DOMAINS ( \
  615. BIT(POWER_DOMAIN_PIPE_A) | \
  616. BIT(POWER_DOMAIN_INIT))
  617. #define CHV_PIPE_B_POWER_DOMAINS ( \
  618. BIT(POWER_DOMAIN_PIPE_B) | \
  619. BIT(POWER_DOMAIN_INIT))
  620. #define CHV_PIPE_C_POWER_DOMAINS ( \
  621. BIT(POWER_DOMAIN_PIPE_C) | \
  622. BIT(POWER_DOMAIN_INIT))
  623. #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
  624. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  625. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  626. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  627. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  628. BIT(POWER_DOMAIN_INIT))
  629. #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
  630. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  631. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  632. BIT(POWER_DOMAIN_INIT))
  633. #define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
  634. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  635. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  636. BIT(POWER_DOMAIN_INIT))
  637. #define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
  638. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  639. BIT(POWER_DOMAIN_INIT))
  640. static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
  641. .sync_hw = i9xx_always_on_power_well_noop,
  642. .enable = i9xx_always_on_power_well_noop,
  643. .disable = i9xx_always_on_power_well_noop,
  644. .is_enabled = i9xx_always_on_power_well_enabled,
  645. };
  646. static const struct i915_power_well_ops chv_pipe_power_well_ops = {
  647. .sync_hw = chv_pipe_power_well_sync_hw,
  648. .enable = chv_pipe_power_well_enable,
  649. .disable = chv_pipe_power_well_disable,
  650. .is_enabled = chv_pipe_power_well_enabled,
  651. };
  652. static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
  653. .sync_hw = vlv_power_well_sync_hw,
  654. .enable = chv_dpio_cmn_power_well_enable,
  655. .disable = chv_dpio_cmn_power_well_disable,
  656. .is_enabled = vlv_power_well_enabled,
  657. };
  658. static struct i915_power_well i9xx_always_on_power_well[] = {
  659. {
  660. .name = "always-on",
  661. .always_on = 1,
  662. .domains = POWER_DOMAIN_MASK,
  663. .ops = &i9xx_always_on_power_well_ops,
  664. },
  665. };
  666. static const struct i915_power_well_ops hsw_power_well_ops = {
  667. .sync_hw = hsw_power_well_sync_hw,
  668. .enable = hsw_power_well_enable,
  669. .disable = hsw_power_well_disable,
  670. .is_enabled = hsw_power_well_enabled,
  671. };
  672. static struct i915_power_well hsw_power_wells[] = {
  673. {
  674. .name = "always-on",
  675. .always_on = 1,
  676. .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
  677. .ops = &i9xx_always_on_power_well_ops,
  678. },
  679. {
  680. .name = "display",
  681. .domains = HSW_DISPLAY_POWER_DOMAINS,
  682. .ops = &hsw_power_well_ops,
  683. },
  684. };
  685. static struct i915_power_well bdw_power_wells[] = {
  686. {
  687. .name = "always-on",
  688. .always_on = 1,
  689. .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
  690. .ops = &i9xx_always_on_power_well_ops,
  691. },
  692. {
  693. .name = "display",
  694. .domains = BDW_DISPLAY_POWER_DOMAINS,
  695. .ops = &hsw_power_well_ops,
  696. },
  697. };
  698. static const struct i915_power_well_ops vlv_display_power_well_ops = {
  699. .sync_hw = vlv_power_well_sync_hw,
  700. .enable = vlv_display_power_well_enable,
  701. .disable = vlv_display_power_well_disable,
  702. .is_enabled = vlv_power_well_enabled,
  703. };
  704. static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
  705. .sync_hw = vlv_power_well_sync_hw,
  706. .enable = vlv_dpio_cmn_power_well_enable,
  707. .disable = vlv_dpio_cmn_power_well_disable,
  708. .is_enabled = vlv_power_well_enabled,
  709. };
  710. static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
  711. .sync_hw = vlv_power_well_sync_hw,
  712. .enable = vlv_power_well_enable,
  713. .disable = vlv_power_well_disable,
  714. .is_enabled = vlv_power_well_enabled,
  715. };
  716. static struct i915_power_well vlv_power_wells[] = {
  717. {
  718. .name = "always-on",
  719. .always_on = 1,
  720. .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
  721. .ops = &i9xx_always_on_power_well_ops,
  722. },
  723. {
  724. .name = "display",
  725. .domains = VLV_DISPLAY_POWER_DOMAINS,
  726. .data = PUNIT_POWER_WELL_DISP2D,
  727. .ops = &vlv_display_power_well_ops,
  728. },
  729. {
  730. .name = "dpio-tx-b-01",
  731. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  732. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  733. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  734. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  735. .ops = &vlv_dpio_power_well_ops,
  736. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
  737. },
  738. {
  739. .name = "dpio-tx-b-23",
  740. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  741. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  742. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  743. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  744. .ops = &vlv_dpio_power_well_ops,
  745. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
  746. },
  747. {
  748. .name = "dpio-tx-c-01",
  749. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  750. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  751. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  752. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  753. .ops = &vlv_dpio_power_well_ops,
  754. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
  755. },
  756. {
  757. .name = "dpio-tx-c-23",
  758. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  759. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  760. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  761. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  762. .ops = &vlv_dpio_power_well_ops,
  763. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
  764. },
  765. {
  766. .name = "dpio-common",
  767. .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
  768. .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
  769. .ops = &vlv_dpio_cmn_power_well_ops,
  770. },
  771. };
  772. static struct i915_power_well chv_power_wells[] = {
  773. {
  774. .name = "always-on",
  775. .always_on = 1,
  776. .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
  777. .ops = &i9xx_always_on_power_well_ops,
  778. },
  779. #if 0
  780. {
  781. .name = "display",
  782. .domains = VLV_DISPLAY_POWER_DOMAINS,
  783. .data = PUNIT_POWER_WELL_DISP2D,
  784. .ops = &vlv_display_power_well_ops,
  785. },
  786. #endif
  787. {
  788. .name = "pipe-a",
  789. /*
  790. * FIXME: pipe A power well seems to be the new disp2d well.
  791. * At least all registers seem to be housed there. Figure
  792. * out if this a a temporary situation in pre-production
  793. * hardware or a permanent state of affairs.
  794. */
  795. .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
  796. .data = PIPE_A,
  797. .ops = &chv_pipe_power_well_ops,
  798. },
  799. #if 0
  800. {
  801. .name = "pipe-b",
  802. .domains = CHV_PIPE_B_POWER_DOMAINS,
  803. .data = PIPE_B,
  804. .ops = &chv_pipe_power_well_ops,
  805. },
  806. {
  807. .name = "pipe-c",
  808. .domains = CHV_PIPE_C_POWER_DOMAINS,
  809. .data = PIPE_C,
  810. .ops = &chv_pipe_power_well_ops,
  811. },
  812. #endif
  813. {
  814. .name = "dpio-common-bc",
  815. /*
  816. * XXX: cmnreset for one PHY seems to disturb the other.
  817. * As a workaround keep both powered on at the same
  818. * time for now.
  819. */
  820. .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
  821. .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
  822. .ops = &chv_dpio_cmn_power_well_ops,
  823. },
  824. {
  825. .name = "dpio-common-d",
  826. /*
  827. * XXX: cmnreset for one PHY seems to disturb the other.
  828. * As a workaround keep both powered on at the same
  829. * time for now.
  830. */
  831. .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
  832. .data = PUNIT_POWER_WELL_DPIO_CMN_D,
  833. .ops = &chv_dpio_cmn_power_well_ops,
  834. },
  835. #if 0
  836. {
  837. .name = "dpio-tx-b-01",
  838. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  839. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
  840. .ops = &vlv_dpio_power_well_ops,
  841. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
  842. },
  843. {
  844. .name = "dpio-tx-b-23",
  845. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  846. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
  847. .ops = &vlv_dpio_power_well_ops,
  848. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
  849. },
  850. {
  851. .name = "dpio-tx-c-01",
  852. .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  853. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  854. .ops = &vlv_dpio_power_well_ops,
  855. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
  856. },
  857. {
  858. .name = "dpio-tx-c-23",
  859. .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  860. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  861. .ops = &vlv_dpio_power_well_ops,
  862. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
  863. },
  864. {
  865. .name = "dpio-tx-d-01",
  866. .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
  867. CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
  868. .ops = &vlv_dpio_power_well_ops,
  869. .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
  870. },
  871. {
  872. .name = "dpio-tx-d-23",
  873. .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
  874. CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
  875. .ops = &vlv_dpio_power_well_ops,
  876. .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
  877. },
  878. #endif
  879. };
  880. static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
  881. enum punit_power_well power_well_id)
  882. {
  883. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  884. struct i915_power_well *power_well;
  885. int i;
  886. for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
  887. if (power_well->data == power_well_id)
  888. return power_well;
  889. }
  890. return NULL;
  891. }
  892. #define set_power_wells(power_domains, __power_wells) ({ \
  893. (power_domains)->power_wells = (__power_wells); \
  894. (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
  895. })
  896. /**
  897. * intel_power_domains_init - initializes the power domain structures
  898. * @dev_priv: i915 device instance
  899. *
  900. * Initializes the power domain structures for @dev_priv depending upon the
  901. * supported platform.
  902. */
  903. int intel_power_domains_init(struct drm_i915_private *dev_priv)
  904. {
  905. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  906. mutex_init(&power_domains->lock);
  907. /*
  908. * The enabling order will be from lower to higher indexed wells,
  909. * the disabling order is reversed.
  910. */
  911. if (IS_HASWELL(dev_priv->dev)) {
  912. set_power_wells(power_domains, hsw_power_wells);
  913. } else if (IS_BROADWELL(dev_priv->dev)) {
  914. set_power_wells(power_domains, bdw_power_wells);
  915. } else if (IS_CHERRYVIEW(dev_priv->dev)) {
  916. set_power_wells(power_domains, chv_power_wells);
  917. } else if (IS_VALLEYVIEW(dev_priv->dev)) {
  918. set_power_wells(power_domains, vlv_power_wells);
  919. } else {
  920. set_power_wells(power_domains, i9xx_always_on_power_well);
  921. }
  922. return 0;
  923. }
  924. static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
  925. {
  926. struct drm_device *dev = dev_priv->dev;
  927. struct device *device = &dev->pdev->dev;
  928. if (!HAS_RUNTIME_PM(dev))
  929. return;
  930. if (!intel_enable_rc6(dev))
  931. return;
  932. /* Make sure we're not suspended first. */
  933. pm_runtime_get_sync(device);
  934. pm_runtime_disable(device);
  935. }
  936. /**
  937. * intel_power_domains_fini - finalizes the power domain structures
  938. * @dev_priv: i915 device instance
  939. *
  940. * Finalizes the power domain structures for @dev_priv depending upon the
  941. * supported platform. This function also disables runtime pm and ensures that
  942. * the device stays powered up so that the driver can be reloaded.
  943. */
  944. void intel_power_domains_fini(struct drm_i915_private *dev_priv)
  945. {
  946. intel_runtime_pm_disable(dev_priv);
  947. /* The i915.ko module is still not prepared to be loaded when
  948. * the power well is not enabled, so just enable it in case
  949. * we're going to unload/reload. */
  950. intel_display_set_init_power(dev_priv, true);
  951. }
  952. static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
  953. {
  954. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  955. struct i915_power_well *power_well;
  956. int i;
  957. mutex_lock(&power_domains->lock);
  958. for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
  959. power_well->ops->sync_hw(dev_priv, power_well);
  960. power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
  961. power_well);
  962. }
  963. mutex_unlock(&power_domains->lock);
  964. }
  965. static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
  966. {
  967. struct i915_power_well *cmn =
  968. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
  969. struct i915_power_well *disp2d =
  970. lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
  971. /* If the display might be already active skip this */
  972. if (cmn->ops->is_enabled(dev_priv, cmn) &&
  973. disp2d->ops->is_enabled(dev_priv, disp2d) &&
  974. I915_READ(DPIO_CTL) & DPIO_CMNRST)
  975. return;
  976. DRM_DEBUG_KMS("toggling display PHY side reset\n");
  977. /* cmnlane needs DPLL registers */
  978. disp2d->ops->enable(dev_priv, disp2d);
  979. /*
  980. * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
  981. * Need to assert and de-assert PHY SB reset by gating the
  982. * common lane power, then un-gating it.
  983. * Simply ungating isn't enough to reset the PHY enough to get
  984. * ports and lanes running.
  985. */
  986. cmn->ops->disable(dev_priv, cmn);
  987. }
  988. /**
  989. * intel_power_domains_init_hw - initialize hardware power domain state
  990. * @dev_priv: i915 device instance
  991. *
  992. * This function initializes the hardware power domain state and enables all
  993. * power domains using intel_display_set_init_power().
  994. */
  995. void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
  996. {
  997. struct drm_device *dev = dev_priv->dev;
  998. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  999. power_domains->initializing = true;
  1000. if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
  1001. mutex_lock(&power_domains->lock);
  1002. vlv_cmnlane_wa(dev_priv);
  1003. mutex_unlock(&power_domains->lock);
  1004. }
  1005. /* For now, we need the power well to be always enabled. */
  1006. intel_display_set_init_power(dev_priv, true);
  1007. intel_power_domains_resume(dev_priv);
  1008. power_domains->initializing = false;
  1009. }
  1010. /**
  1011. * intel_aux_display_runtime_get - grab an auxilliary power domain reference
  1012. * @dev_priv: i915 device instance
  1013. *
  1014. * This function grabs a power domain reference for the auxiliary power domain
  1015. * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
  1016. * parents are powered up. Therefore users should only grab a reference to the
  1017. * innermost power domain they need.
  1018. *
  1019. * Any power domain reference obtained by this function must have a symmetric
  1020. * call to intel_aux_display_runtime_put() to release the reference again.
  1021. */
  1022. void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
  1023. {
  1024. intel_runtime_pm_get(dev_priv);
  1025. }
  1026. /**
  1027. * intel_aux_display_runtime_put - release an auxilliary power domain reference
  1028. * @dev_priv: i915 device instance
  1029. *
  1030. * This function drops the auxilliary power domain reference obtained by
  1031. * intel_aux_display_runtime_get() and might power down the corresponding
  1032. * hardware block right away if this is the last reference.
  1033. */
  1034. void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
  1035. {
  1036. intel_runtime_pm_put(dev_priv);
  1037. }
  1038. /**
  1039. * intel_runtime_pm_get - grab a runtime pm reference
  1040. * @dev_priv: i915 device instance
  1041. *
  1042. * This function grabs a device-level runtime pm reference (mostly used for GEM
  1043. * code to ensure the GTT or GT is on) and ensures that it is powered up.
  1044. *
  1045. * Any runtime pm reference obtained by this function must have a symmetric
  1046. * call to intel_runtime_pm_put() to release the reference again.
  1047. */
  1048. void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  1049. {
  1050. struct drm_device *dev = dev_priv->dev;
  1051. struct device *device = &dev->pdev->dev;
  1052. if (!HAS_RUNTIME_PM(dev))
  1053. return;
  1054. pm_runtime_get_sync(device);
  1055. WARN(dev_priv->pm.suspended, "Device still suspended.\n");
  1056. }
  1057. /**
  1058. * intel_runtime_pm_get_noresume - grab a runtime pm reference
  1059. * @dev_priv: i915 device instance
  1060. *
  1061. * This function grabs a device-level runtime pm reference (mostly used for GEM
  1062. * code to ensure the GTT or GT is on).
  1063. *
  1064. * It will _not_ power up the device but instead only check that it's powered
  1065. * on. Therefore it is only valid to call this functions from contexts where
  1066. * the device is known to be powered up and where trying to power it up would
  1067. * result in hilarity and deadlocks. That pretty much means only the system
  1068. * suspend/resume code where this is used to grab runtime pm references for
  1069. * delayed setup down in work items.
  1070. *
  1071. * Any runtime pm reference obtained by this function must have a symmetric
  1072. * call to intel_runtime_pm_put() to release the reference again.
  1073. */
  1074. void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  1075. {
  1076. struct drm_device *dev = dev_priv->dev;
  1077. struct device *device = &dev->pdev->dev;
  1078. if (!HAS_RUNTIME_PM(dev))
  1079. return;
  1080. WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
  1081. pm_runtime_get_noresume(device);
  1082. }
  1083. /**
  1084. * intel_runtime_pm_put - release a runtime pm reference
  1085. * @dev_priv: i915 device instance
  1086. *
  1087. * This function drops the device-level runtime pm reference obtained by
  1088. * intel_runtime_pm_get() and might power down the corresponding
  1089. * hardware block right away if this is the last reference.
  1090. */
  1091. void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  1092. {
  1093. struct drm_device *dev = dev_priv->dev;
  1094. struct device *device = &dev->pdev->dev;
  1095. if (!HAS_RUNTIME_PM(dev))
  1096. return;
  1097. pm_runtime_mark_last_busy(device);
  1098. pm_runtime_put_autosuspend(device);
  1099. }
  1100. /**
  1101. * intel_runtime_pm_enable - enable runtime pm
  1102. * @dev_priv: i915 device instance
  1103. *
  1104. * This function enables runtime pm at the end of the driver load sequence.
  1105. *
  1106. * Note that this function does currently not enable runtime pm for the
  1107. * subordinate display power domains. That is only done on the first modeset
  1108. * using intel_display_set_init_power().
  1109. */
  1110. void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
  1111. {
  1112. struct drm_device *dev = dev_priv->dev;
  1113. struct device *device = &dev->pdev->dev;
  1114. if (!HAS_RUNTIME_PM(dev))
  1115. return;
  1116. pm_runtime_set_active(device);
  1117. /*
  1118. * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
  1119. * requirement.
  1120. */
  1121. if (!intel_enable_rc6(dev)) {
  1122. DRM_INFO("RC6 disabled, disabling runtime PM support\n");
  1123. return;
  1124. }
  1125. pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
  1126. pm_runtime_mark_last_busy(device);
  1127. pm_runtime_use_autosuspend(device);
  1128. pm_runtime_put_autosuspend(device);
  1129. }