intel_psr.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481
  1. /*
  2. * Copyright © 2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. */
  23. /**
  24. * DOC: Panel Self Refresh (PSR/SRD)
  25. *
  26. * Since Haswell Display controller supports Panel Self-Refresh on display
  27. * panels witch have a remote frame buffer (RFB) implemented according to PSR
  28. * spec in eDP1.3. PSR feature allows the display to go to lower standby states
  29. * when system is idle but display is on as it eliminates display refresh
  30. * request to DDR memory completely as long as the frame buffer for that
  31. * display is unchanged.
  32. *
  33. * Panel Self Refresh must be supported by both Hardware (source) and
  34. * Panel (sink).
  35. *
  36. * PSR saves power by caching the framebuffer in the panel RFB, which allows us
  37. * to power down the link and memory controller. For DSI panels the same idea
  38. * is called "manual mode".
  39. *
  40. * The implementation uses the hardware-based PSR support which automatically
  41. * enters/exits self-refresh mode. The hardware takes care of sending the
  42. * required DP aux message and could even retrain the link (that part isn't
  43. * enabled yet though). The hardware also keeps track of any frontbuffer
  44. * changes to know when to exit self-refresh mode again. Unfortunately that
  45. * part doesn't work too well, hence why the i915 PSR support uses the
  46. * software frontbuffer tracking to make sure it doesn't miss a screen
  47. * update. For this integration intel_psr_invalidate() and intel_psr_flush()
  48. * get called by the frontbuffer tracking code. Note that because of locking
  49. * issues the self-refresh re-enable code is done from a work queue, which
  50. * must be correctly synchronized/cancelled when shutting down the pipe."
  51. */
  52. #include <drm/drmP.h>
  53. #include "intel_drv.h"
  54. #include "i915_drv.h"
  55. static bool is_edp_psr(struct intel_dp *intel_dp)
  56. {
  57. return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
  58. }
  59. bool intel_psr_is_enabled(struct drm_device *dev)
  60. {
  61. struct drm_i915_private *dev_priv = dev->dev_private;
  62. if (!HAS_PSR(dev))
  63. return false;
  64. return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
  65. }
  66. static void intel_psr_write_vsc(struct intel_dp *intel_dp,
  67. struct edp_vsc_psr *vsc_psr)
  68. {
  69. struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  70. struct drm_device *dev = dig_port->base.base.dev;
  71. struct drm_i915_private *dev_priv = dev->dev_private;
  72. struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
  73. u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
  74. u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
  75. uint32_t *data = (uint32_t *) vsc_psr;
  76. unsigned int i;
  77. /* As per BSPec (Pipe Video Data Island Packet), we need to disable
  78. the video DIP being updated before program video DIP data buffer
  79. registers for DIP being updated. */
  80. I915_WRITE(ctl_reg, 0);
  81. POSTING_READ(ctl_reg);
  82. for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
  83. if (i < sizeof(struct edp_vsc_psr))
  84. I915_WRITE(data_reg + i, *data++);
  85. else
  86. I915_WRITE(data_reg + i, 0);
  87. }
  88. I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
  89. POSTING_READ(ctl_reg);
  90. }
  91. static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
  92. {
  93. struct edp_vsc_psr psr_vsc;
  94. /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
  95. memset(&psr_vsc, 0, sizeof(psr_vsc));
  96. psr_vsc.sdp_header.HB0 = 0;
  97. psr_vsc.sdp_header.HB1 = 0x7;
  98. psr_vsc.sdp_header.HB2 = 0x2;
  99. psr_vsc.sdp_header.HB3 = 0x8;
  100. intel_psr_write_vsc(intel_dp, &psr_vsc);
  101. }
  102. static void intel_psr_enable_sink(struct intel_dp *intel_dp)
  103. {
  104. struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  105. struct drm_device *dev = dig_port->base.base.dev;
  106. struct drm_i915_private *dev_priv = dev->dev_private;
  107. uint32_t aux_clock_divider;
  108. int precharge = 0x3;
  109. bool only_standby = false;
  110. static const uint8_t aux_msg[] = {
  111. [0] = DP_AUX_NATIVE_WRITE << 4,
  112. [1] = DP_SET_POWER >> 8,
  113. [2] = DP_SET_POWER & 0xff,
  114. [3] = 1 - 1,
  115. [4] = DP_SET_POWER_D0,
  116. };
  117. int i;
  118. BUILD_BUG_ON(sizeof(aux_msg) > 20);
  119. aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
  120. if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
  121. only_standby = true;
  122. /* Enable PSR in sink */
  123. if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
  124. drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
  125. DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
  126. else
  127. drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
  128. DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
  129. /* Setup AUX registers */
  130. for (i = 0; i < sizeof(aux_msg); i += 4)
  131. I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
  132. intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
  133. I915_WRITE(EDP_PSR_AUX_CTL(dev),
  134. DP_AUX_CH_CTL_TIME_OUT_400us |
  135. (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
  136. (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
  137. (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
  138. }
  139. static void intel_psr_enable_source(struct intel_dp *intel_dp)
  140. {
  141. struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  142. struct drm_device *dev = dig_port->base.base.dev;
  143. struct drm_i915_private *dev_priv = dev->dev_private;
  144. uint32_t max_sleep_time = 0x1f;
  145. uint32_t idle_frames = 1;
  146. uint32_t val = 0x0;
  147. const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
  148. bool only_standby = false;
  149. if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
  150. only_standby = true;
  151. if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
  152. val |= EDP_PSR_LINK_STANDBY;
  153. val |= EDP_PSR_TP2_TP3_TIME_0us;
  154. val |= EDP_PSR_TP1_TIME_0us;
  155. val |= EDP_PSR_SKIP_AUX_EXIT;
  156. val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
  157. } else
  158. val |= EDP_PSR_LINK_DISABLE;
  159. I915_WRITE(EDP_PSR_CTL(dev), val |
  160. (IS_BROADWELL(dev) ? 0 : link_entry_time) |
  161. max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
  162. idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
  163. EDP_PSR_ENABLE);
  164. }
  165. static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
  166. {
  167. struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  168. struct drm_device *dev = dig_port->base.base.dev;
  169. struct drm_i915_private *dev_priv = dev->dev_private;
  170. struct drm_crtc *crtc = dig_port->base.base.crtc;
  171. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  172. lockdep_assert_held(&dev_priv->psr.lock);
  173. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  174. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  175. dev_priv->psr.source_ok = false;
  176. if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
  177. DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
  178. return false;
  179. }
  180. if (!i915.enable_psr) {
  181. DRM_DEBUG_KMS("PSR disable by flag\n");
  182. return false;
  183. }
  184. /* Below limitations aren't valid for Broadwell */
  185. if (IS_BROADWELL(dev))
  186. goto out;
  187. if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
  188. S3D_ENABLE) {
  189. DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
  190. return false;
  191. }
  192. if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
  193. DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
  194. return false;
  195. }
  196. out:
  197. dev_priv->psr.source_ok = true;
  198. return true;
  199. }
  200. static void intel_psr_do_enable(struct intel_dp *intel_dp)
  201. {
  202. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  203. struct drm_device *dev = intel_dig_port->base.base.dev;
  204. struct drm_i915_private *dev_priv = dev->dev_private;
  205. WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
  206. WARN_ON(dev_priv->psr.active);
  207. lockdep_assert_held(&dev_priv->psr.lock);
  208. /* Enable/Re-enable PSR on the host */
  209. intel_psr_enable_source(intel_dp);
  210. dev_priv->psr.active = true;
  211. }
  212. /**
  213. * intel_psr_enable - Enable PSR
  214. * @intel_dp: Intel DP
  215. *
  216. * This function can only be called after the pipe is fully trained and enabled.
  217. */
  218. void intel_psr_enable(struct intel_dp *intel_dp)
  219. {
  220. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  221. struct drm_device *dev = intel_dig_port->base.base.dev;
  222. struct drm_i915_private *dev_priv = dev->dev_private;
  223. if (!HAS_PSR(dev)) {
  224. DRM_DEBUG_KMS("PSR not supported on this platform\n");
  225. return;
  226. }
  227. if (!is_edp_psr(intel_dp)) {
  228. DRM_DEBUG_KMS("PSR not supported by this panel\n");
  229. return;
  230. }
  231. mutex_lock(&dev_priv->psr.lock);
  232. if (dev_priv->psr.enabled) {
  233. DRM_DEBUG_KMS("PSR already in use\n");
  234. goto unlock;
  235. }
  236. if (!intel_psr_match_conditions(intel_dp))
  237. goto unlock;
  238. dev_priv->psr.busy_frontbuffer_bits = 0;
  239. intel_psr_setup_vsc(intel_dp);
  240. /* Avoid continuous PSR exit by masking memup and hpd */
  241. I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
  242. EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
  243. /* Enable PSR on the panel */
  244. intel_psr_enable_sink(intel_dp);
  245. dev_priv->psr.enabled = intel_dp;
  246. unlock:
  247. mutex_unlock(&dev_priv->psr.lock);
  248. }
  249. /**
  250. * intel_psr_disable - Disable PSR
  251. * @intel_dp: Intel DP
  252. *
  253. * This function needs to be called before disabling pipe.
  254. */
  255. void intel_psr_disable(struct intel_dp *intel_dp)
  256. {
  257. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  258. struct drm_device *dev = intel_dig_port->base.base.dev;
  259. struct drm_i915_private *dev_priv = dev->dev_private;
  260. mutex_lock(&dev_priv->psr.lock);
  261. if (!dev_priv->psr.enabled) {
  262. mutex_unlock(&dev_priv->psr.lock);
  263. return;
  264. }
  265. if (dev_priv->psr.active) {
  266. I915_WRITE(EDP_PSR_CTL(dev),
  267. I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
  268. /* Wait till PSR is idle */
  269. if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
  270. EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
  271. DRM_ERROR("Timed out waiting for PSR Idle State\n");
  272. dev_priv->psr.active = false;
  273. } else {
  274. WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
  275. }
  276. dev_priv->psr.enabled = NULL;
  277. mutex_unlock(&dev_priv->psr.lock);
  278. cancel_delayed_work_sync(&dev_priv->psr.work);
  279. }
  280. static void intel_psr_work(struct work_struct *work)
  281. {
  282. struct drm_i915_private *dev_priv =
  283. container_of(work, typeof(*dev_priv), psr.work.work);
  284. struct intel_dp *intel_dp = dev_priv->psr.enabled;
  285. /* We have to make sure PSR is ready for re-enable
  286. * otherwise it keeps disabled until next full enable/disable cycle.
  287. * PSR might take some time to get fully disabled
  288. * and be ready for re-enable.
  289. */
  290. if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
  291. EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
  292. DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
  293. return;
  294. }
  295. mutex_lock(&dev_priv->psr.lock);
  296. intel_dp = dev_priv->psr.enabled;
  297. if (!intel_dp)
  298. goto unlock;
  299. /*
  300. * The delayed work can race with an invalidate hence we need to
  301. * recheck. Since psr_flush first clears this and then reschedules we
  302. * won't ever miss a flush when bailing out here.
  303. */
  304. if (dev_priv->psr.busy_frontbuffer_bits)
  305. goto unlock;
  306. intel_psr_do_enable(intel_dp);
  307. unlock:
  308. mutex_unlock(&dev_priv->psr.lock);
  309. }
  310. static void intel_psr_exit(struct drm_device *dev)
  311. {
  312. struct drm_i915_private *dev_priv = dev->dev_private;
  313. if (dev_priv->psr.active) {
  314. u32 val = I915_READ(EDP_PSR_CTL(dev));
  315. WARN_ON(!(val & EDP_PSR_ENABLE));
  316. I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
  317. dev_priv->psr.active = false;
  318. }
  319. }
  320. /**
  321. * intel_psr_invalidate - Invalidade PSR
  322. * @dev: DRM device
  323. * @frontbuffer_bits: frontbuffer plane tracking bits
  324. *
  325. * Since the hardware frontbuffer tracking has gaps we need to integrate
  326. * with the software frontbuffer tracking. This function gets called every
  327. * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
  328. * disabled if the frontbuffer mask contains a buffer relevant to PSR.
  329. *
  330. * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
  331. */
  332. void intel_psr_invalidate(struct drm_device *dev,
  333. unsigned frontbuffer_bits)
  334. {
  335. struct drm_i915_private *dev_priv = dev->dev_private;
  336. struct drm_crtc *crtc;
  337. enum pipe pipe;
  338. mutex_lock(&dev_priv->psr.lock);
  339. if (!dev_priv->psr.enabled) {
  340. mutex_unlock(&dev_priv->psr.lock);
  341. return;
  342. }
  343. crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
  344. pipe = to_intel_crtc(crtc)->pipe;
  345. intel_psr_exit(dev);
  346. frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
  347. dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
  348. mutex_unlock(&dev_priv->psr.lock);
  349. }
  350. /**
  351. * intel_psr_flush - Flush PSR
  352. * @dev: DRM device
  353. * @frontbuffer_bits: frontbuffer plane tracking bits
  354. *
  355. * Since the hardware frontbuffer tracking has gaps we need to integrate
  356. * with the software frontbuffer tracking. This function gets called every
  357. * time frontbuffer rendering has completed and flushed out to memory. PSR
  358. * can be enabled again if no other frontbuffer relevant to PSR is dirty.
  359. *
  360. * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
  361. */
  362. void intel_psr_flush(struct drm_device *dev,
  363. unsigned frontbuffer_bits)
  364. {
  365. struct drm_i915_private *dev_priv = dev->dev_private;
  366. struct drm_crtc *crtc;
  367. enum pipe pipe;
  368. mutex_lock(&dev_priv->psr.lock);
  369. if (!dev_priv->psr.enabled) {
  370. mutex_unlock(&dev_priv->psr.lock);
  371. return;
  372. }
  373. crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
  374. pipe = to_intel_crtc(crtc)->pipe;
  375. dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
  376. /*
  377. * On Haswell sprite plane updates don't result in a psr invalidating
  378. * signal in the hardware. Which means we need to manually fake this in
  379. * software for all flushes, not just when we've seen a preceding
  380. * invalidation through frontbuffer rendering.
  381. */
  382. if (IS_HASWELL(dev) &&
  383. (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
  384. intel_psr_exit(dev);
  385. if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
  386. schedule_delayed_work(&dev_priv->psr.work,
  387. msecs_to_jiffies(100));
  388. mutex_unlock(&dev_priv->psr.lock);
  389. }
  390. /**
  391. * intel_psr_init - Init basic PSR work and mutex.
  392. * @dev: DRM device
  393. *
  394. * This function is called only once at driver load to initialize basic
  395. * PSR stuff.
  396. */
  397. void intel_psr_init(struct drm_device *dev)
  398. {
  399. struct drm_i915_private *dev_priv = dev->dev_private;
  400. INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
  401. mutex_init(&dev_priv->psr.lock);
  402. }