intel_psr.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099
  1. /*
  2. * Copyright © 2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. */
  23. /**
  24. * DOC: Panel Self Refresh (PSR/SRD)
  25. *
  26. * Since Haswell Display controller supports Panel Self-Refresh on display
  27. * panels witch have a remote frame buffer (RFB) implemented according to PSR
  28. * spec in eDP1.3. PSR feature allows the display to go to lower standby states
  29. * when system is idle but display is on as it eliminates display refresh
  30. * request to DDR memory completely as long as the frame buffer for that
  31. * display is unchanged.
  32. *
  33. * Panel Self Refresh must be supported by both Hardware (source) and
  34. * Panel (sink).
  35. *
  36. * PSR saves power by caching the framebuffer in the panel RFB, which allows us
  37. * to power down the link and memory controller. For DSI panels the same idea
  38. * is called "manual mode".
  39. *
  40. * The implementation uses the hardware-based PSR support which automatically
  41. * enters/exits self-refresh mode. The hardware takes care of sending the
  42. * required DP aux message and could even retrain the link (that part isn't
  43. * enabled yet though). The hardware also keeps track of any frontbuffer
  44. * changes to know when to exit self-refresh mode again. Unfortunately that
  45. * part doesn't work too well, hence why the i915 PSR support uses the
  46. * software frontbuffer tracking to make sure it doesn't miss a screen
  47. * update. For this integration intel_psr_invalidate() and intel_psr_flush()
  48. * get called by the frontbuffer tracking code. Note that because of locking
  49. * issues the self-refresh re-enable code is done from a work queue, which
  50. * must be correctly synchronized/cancelled when shutting down the pipe."
  51. */
  52. #include <drm/drmP.h>
  53. #include "intel_drv.h"
  54. #include "i915_drv.h"
  55. static inline enum intel_display_power_domain
  56. psr_aux_domain(struct intel_dp *intel_dp)
  57. {
  58. /* CNL HW requires corresponding AUX IOs to be powered up for PSR.
  59. * However, for non-A AUX ports the corresponding non-EDP transcoders
  60. * would have already enabled power well 2 and DC_OFF. This means we can
  61. * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
  62. * specific AUX_IO reference without powering up any extra wells.
  63. * Note that PSR is enabled only on Port A even though this function
  64. * returns the correct domain for other ports too.
  65. */
  66. return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
  67. intel_dp->aux_power_domain;
  68. }
  69. static void psr_aux_io_power_get(struct intel_dp *intel_dp)
  70. {
  71. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  72. struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
  73. if (INTEL_GEN(dev_priv) < 10)
  74. return;
  75. intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
  76. }
  77. static void psr_aux_io_power_put(struct intel_dp *intel_dp)
  78. {
  79. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  80. struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
  81. if (INTEL_GEN(dev_priv) < 10)
  82. return;
  83. intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
  84. }
  85. static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
  86. {
  87. uint8_t psr_caps = 0;
  88. if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
  89. return false;
  90. return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
  91. }
  92. static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
  93. {
  94. uint8_t dprx = 0;
  95. if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
  96. &dprx) != 1)
  97. return false;
  98. return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
  99. }
  100. static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
  101. {
  102. uint8_t alpm_caps = 0;
  103. if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
  104. &alpm_caps) != 1)
  105. return false;
  106. return alpm_caps & DP_ALPM_CAP;
  107. }
  108. void intel_psr_init_dpcd(struct intel_dp *intel_dp)
  109. {
  110. struct drm_i915_private *dev_priv =
  111. to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
  112. drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
  113. sizeof(intel_dp->psr_dpcd));
  114. if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
  115. dev_priv->psr.sink_support = true;
  116. DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
  117. }
  118. if (INTEL_GEN(dev_priv) >= 9 &&
  119. (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
  120. uint8_t frame_sync_cap;
  121. dev_priv->psr.sink_support = true;
  122. if (drm_dp_dpcd_readb(&intel_dp->aux,
  123. DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
  124. &frame_sync_cap) != 1)
  125. frame_sync_cap = 0;
  126. dev_priv->psr.aux_frame_sync = frame_sync_cap & DP_AUX_FRAME_SYNC_CAP;
  127. /* PSR2 needs frame sync as well */
  128. dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
  129. DRM_DEBUG_KMS("PSR2 %s on sink",
  130. dev_priv->psr.psr2_support ? "supported" : "not supported");
  131. if (dev_priv->psr.psr2_support) {
  132. dev_priv->psr.y_cord_support =
  133. intel_dp_get_y_cord_status(intel_dp);
  134. dev_priv->psr.colorimetry_support =
  135. intel_dp_get_colorimetry_status(intel_dp);
  136. dev_priv->psr.alpm =
  137. intel_dp_get_alpm_status(intel_dp);
  138. }
  139. }
  140. }
  141. static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
  142. {
  143. struct drm_i915_private *dev_priv = to_i915(dev);
  144. uint32_t val;
  145. val = I915_READ(VLV_PSRSTAT(pipe)) &
  146. VLV_EDP_PSR_CURR_STATE_MASK;
  147. return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
  148. (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
  149. }
  150. static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
  151. const struct intel_crtc_state *crtc_state)
  152. {
  153. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  154. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  155. uint32_t val;
  156. /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
  157. val = I915_READ(VLV_VSCSDP(crtc->pipe));
  158. val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
  159. val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
  160. I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
  161. }
  162. static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
  163. const struct intel_crtc_state *crtc_state)
  164. {
  165. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  166. struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
  167. struct edp_vsc_psr psr_vsc;
  168. if (dev_priv->psr.psr2_support) {
  169. /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
  170. memset(&psr_vsc, 0, sizeof(psr_vsc));
  171. psr_vsc.sdp_header.HB0 = 0;
  172. psr_vsc.sdp_header.HB1 = 0x7;
  173. if (dev_priv->psr.colorimetry_support &&
  174. dev_priv->psr.y_cord_support) {
  175. psr_vsc.sdp_header.HB2 = 0x5;
  176. psr_vsc.sdp_header.HB3 = 0x13;
  177. } else if (dev_priv->psr.y_cord_support) {
  178. psr_vsc.sdp_header.HB2 = 0x4;
  179. psr_vsc.sdp_header.HB3 = 0xe;
  180. } else {
  181. psr_vsc.sdp_header.HB2 = 0x3;
  182. psr_vsc.sdp_header.HB3 = 0xc;
  183. }
  184. } else {
  185. /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
  186. memset(&psr_vsc, 0, sizeof(psr_vsc));
  187. psr_vsc.sdp_header.HB0 = 0;
  188. psr_vsc.sdp_header.HB1 = 0x7;
  189. psr_vsc.sdp_header.HB2 = 0x2;
  190. psr_vsc.sdp_header.HB3 = 0x8;
  191. }
  192. intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
  193. DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
  194. }
  195. static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
  196. {
  197. drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
  198. DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
  199. }
  200. static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
  201. enum port port)
  202. {
  203. if (INTEL_GEN(dev_priv) >= 9)
  204. return DP_AUX_CH_CTL(port);
  205. else
  206. return EDP_PSR_AUX_CTL;
  207. }
  208. static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
  209. enum port port, int index)
  210. {
  211. if (INTEL_GEN(dev_priv) >= 9)
  212. return DP_AUX_CH_DATA(port, index);
  213. else
  214. return EDP_PSR_AUX_DATA(index);
  215. }
  216. static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
  217. {
  218. struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  219. struct drm_device *dev = dig_port->base.base.dev;
  220. struct drm_i915_private *dev_priv = to_i915(dev);
  221. uint32_t aux_clock_divider;
  222. i915_reg_t aux_ctl_reg;
  223. static const uint8_t aux_msg[] = {
  224. [0] = DP_AUX_NATIVE_WRITE << 4,
  225. [1] = DP_SET_POWER >> 8,
  226. [2] = DP_SET_POWER & 0xff,
  227. [3] = 1 - 1,
  228. [4] = DP_SET_POWER_D0,
  229. };
  230. enum port port = dig_port->base.port;
  231. u32 aux_ctl;
  232. int i;
  233. BUILD_BUG_ON(sizeof(aux_msg) > 20);
  234. aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
  235. /* Enable AUX frame sync at sink */
  236. if (dev_priv->psr.aux_frame_sync)
  237. drm_dp_dpcd_writeb(&intel_dp->aux,
  238. DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
  239. DP_AUX_FRAME_SYNC_ENABLE);
  240. /* Enable ALPM at sink for psr2 */
  241. if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
  242. drm_dp_dpcd_writeb(&intel_dp->aux,
  243. DP_RECEIVER_ALPM_CONFIG,
  244. DP_ALPM_ENABLE);
  245. if (dev_priv->psr.link_standby)
  246. drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
  247. DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
  248. else
  249. drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
  250. DP_PSR_ENABLE);
  251. aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
  252. /* Setup AUX registers */
  253. for (i = 0; i < sizeof(aux_msg); i += 4)
  254. I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
  255. intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
  256. aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
  257. aux_clock_divider);
  258. I915_WRITE(aux_ctl_reg, aux_ctl);
  259. }
  260. static void vlv_psr_enable_source(struct intel_dp *intel_dp,
  261. const struct intel_crtc_state *crtc_state)
  262. {
  263. struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  264. struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
  265. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  266. /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
  267. I915_WRITE(VLV_PSRCTL(crtc->pipe),
  268. VLV_EDP_PSR_MODE_SW_TIMER |
  269. VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
  270. VLV_EDP_PSR_ENABLE);
  271. }
  272. static void vlv_psr_activate(struct intel_dp *intel_dp)
  273. {
  274. struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  275. struct drm_device *dev = dig_port->base.base.dev;
  276. struct drm_i915_private *dev_priv = to_i915(dev);
  277. struct drm_crtc *crtc = dig_port->base.base.crtc;
  278. enum pipe pipe = to_intel_crtc(crtc)->pipe;
  279. /*
  280. * Let's do the transition from PSR_state 1 (inactive) to
  281. * PSR_state 2 (transition to active - static frame transmission).
  282. * Then Hardware is responsible for the transition to
  283. * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
  284. */
  285. I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
  286. VLV_EDP_PSR_ACTIVE_ENTRY);
  287. }
  288. static void hsw_activate_psr1(struct intel_dp *intel_dp)
  289. {
  290. struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  291. struct drm_device *dev = dig_port->base.base.dev;
  292. struct drm_i915_private *dev_priv = to_i915(dev);
  293. uint32_t max_sleep_time = 0x1f;
  294. /*
  295. * Let's respect VBT in case VBT asks a higher idle_frame value.
  296. * Let's use 6 as the minimum to cover all known cases including
  297. * the off-by-one issue that HW has in some cases. Also there are
  298. * cases where sink should be able to train
  299. * with the 5 or 6 idle patterns.
  300. */
  301. uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
  302. uint32_t val = EDP_PSR_ENABLE;
  303. val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
  304. val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
  305. if (IS_HASWELL(dev_priv))
  306. val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
  307. if (dev_priv->psr.link_standby)
  308. val |= EDP_PSR_LINK_STANDBY;
  309. if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
  310. val |= EDP_PSR_TP1_TIME_2500us;
  311. else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
  312. val |= EDP_PSR_TP1_TIME_500us;
  313. else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
  314. val |= EDP_PSR_TP1_TIME_100us;
  315. else
  316. val |= EDP_PSR_TP1_TIME_0us;
  317. if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
  318. val |= EDP_PSR_TP2_TP3_TIME_2500us;
  319. else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
  320. val |= EDP_PSR_TP2_TP3_TIME_500us;
  321. else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
  322. val |= EDP_PSR_TP2_TP3_TIME_100us;
  323. else
  324. val |= EDP_PSR_TP2_TP3_TIME_0us;
  325. if (intel_dp_source_supports_hbr2(intel_dp) &&
  326. drm_dp_tps3_supported(intel_dp->dpcd))
  327. val |= EDP_PSR_TP1_TP3_SEL;
  328. else
  329. val |= EDP_PSR_TP1_TP2_SEL;
  330. val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
  331. I915_WRITE(EDP_PSR_CTL, val);
  332. }
  333. static void hsw_activate_psr2(struct intel_dp *intel_dp)
  334. {
  335. struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  336. struct drm_device *dev = dig_port->base.base.dev;
  337. struct drm_i915_private *dev_priv = to_i915(dev);
  338. /*
  339. * Let's respect VBT in case VBT asks a higher idle_frame value.
  340. * Let's use 6 as the minimum to cover all known cases including
  341. * the off-by-one issue that HW has in some cases. Also there are
  342. * cases where sink should be able to train
  343. * with the 5 or 6 idle patterns.
  344. */
  345. uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
  346. uint32_t val;
  347. uint8_t sink_latency;
  348. val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
  349. /* FIXME: selective update is probably totally broken because it doesn't
  350. * mesh at all with our frontbuffer tracking. And the hw alone isn't
  351. * good enough. */
  352. val |= EDP_PSR2_ENABLE |
  353. EDP_SU_TRACK_ENABLE;
  354. if (drm_dp_dpcd_readb(&intel_dp->aux,
  355. DP_SYNCHRONIZATION_LATENCY_IN_SINK,
  356. &sink_latency) == 1) {
  357. sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
  358. } else {
  359. sink_latency = 0;
  360. }
  361. val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1);
  362. if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
  363. val |= EDP_PSR2_TP2_TIME_2500;
  364. else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
  365. val |= EDP_PSR2_TP2_TIME_500;
  366. else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
  367. val |= EDP_PSR2_TP2_TIME_100;
  368. else
  369. val |= EDP_PSR2_TP2_TIME_50;
  370. I915_WRITE(EDP_PSR2_CTL, val);
  371. }
  372. static void hsw_psr_activate(struct intel_dp *intel_dp)
  373. {
  374. struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  375. struct drm_device *dev = dig_port->base.base.dev;
  376. struct drm_i915_private *dev_priv = to_i915(dev);
  377. /* On HSW+ after we enable PSR on source it will activate it
  378. * as soon as it match configure idle_frame count. So
  379. * we just actually enable it here on activation time.
  380. */
  381. /* psr1 and psr2 are mutually exclusive.*/
  382. if (dev_priv->psr.psr2_support)
  383. hsw_activate_psr2(intel_dp);
  384. else
  385. hsw_activate_psr1(intel_dp);
  386. }
  387. static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
  388. struct intel_crtc_state *crtc_state)
  389. {
  390. struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  391. struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
  392. int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
  393. int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
  394. int psr_max_h = 0, psr_max_v = 0;
  395. /*
  396. * FIXME psr2_support is messed up. It's both computed
  397. * dynamically during PSR enable, and extracted from sink
  398. * caps during eDP detection.
  399. */
  400. if (!dev_priv->psr.psr2_support)
  401. return false;
  402. if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
  403. psr_max_h = 4096;
  404. psr_max_v = 2304;
  405. } else if (IS_GEN9(dev_priv)) {
  406. psr_max_h = 3640;
  407. psr_max_v = 2304;
  408. }
  409. if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
  410. DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
  411. crtc_hdisplay, crtc_vdisplay,
  412. psr_max_h, psr_max_v);
  413. return false;
  414. }
  415. /*
  416. * FIXME:enable psr2 only for y-cordinate psr2 panels
  417. * After gtc implementation , remove this restriction.
  418. */
  419. if (!dev_priv->psr.y_cord_support) {
  420. DRM_DEBUG_KMS("PSR2 not enabled, panel does not support Y coordinate\n");
  421. return false;
  422. }
  423. return true;
  424. }
  425. void intel_psr_compute_config(struct intel_dp *intel_dp,
  426. struct intel_crtc_state *crtc_state)
  427. {
  428. struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  429. struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
  430. const struct drm_display_mode *adjusted_mode =
  431. &crtc_state->base.adjusted_mode;
  432. int psr_setup_time;
  433. if (!CAN_PSR(dev_priv))
  434. return;
  435. if (!i915_modparams.enable_psr) {
  436. DRM_DEBUG_KMS("PSR disable by flag\n");
  437. return;
  438. }
  439. /*
  440. * HSW spec explicitly says PSR is tied to port A.
  441. * BDW+ platforms with DDI implementation of PSR have different
  442. * PSR registers per transcoder and we only implement transcoder EDP
  443. * ones. Since by Display design transcoder EDP is tied to port A
  444. * we can safely escape based on the port A.
  445. */
  446. if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
  447. DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
  448. return;
  449. }
  450. if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
  451. !dev_priv->psr.link_standby) {
  452. DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
  453. return;
  454. }
  455. if (IS_HASWELL(dev_priv) &&
  456. I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
  457. S3D_ENABLE) {
  458. DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
  459. return;
  460. }
  461. if (IS_HASWELL(dev_priv) &&
  462. adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
  463. DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
  464. return;
  465. }
  466. psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
  467. if (psr_setup_time < 0) {
  468. DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
  469. intel_dp->psr_dpcd[1]);
  470. return;
  471. }
  472. if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
  473. adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
  474. DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
  475. psr_setup_time);
  476. return;
  477. }
  478. if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
  479. DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
  480. return;
  481. }
  482. crtc_state->has_psr = true;
  483. crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
  484. DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
  485. }
  486. static void intel_psr_activate(struct intel_dp *intel_dp)
  487. {
  488. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  489. struct drm_device *dev = intel_dig_port->base.base.dev;
  490. struct drm_i915_private *dev_priv = to_i915(dev);
  491. if (dev_priv->psr.psr2_support)
  492. WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
  493. else
  494. WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
  495. WARN_ON(dev_priv->psr.active);
  496. lockdep_assert_held(&dev_priv->psr.lock);
  497. dev_priv->psr.activate(intel_dp);
  498. dev_priv->psr.active = true;
  499. }
  500. static void hsw_psr_enable_source(struct intel_dp *intel_dp,
  501. const struct intel_crtc_state *crtc_state)
  502. {
  503. struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
  504. struct drm_device *dev = dig_port->base.base.dev;
  505. struct drm_i915_private *dev_priv = to_i915(dev);
  506. enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  507. u32 chicken;
  508. psr_aux_io_power_get(intel_dp);
  509. if (dev_priv->psr.psr2_support) {
  510. chicken = PSR2_VSC_ENABLE_PROG_HEADER;
  511. if (dev_priv->psr.y_cord_support)
  512. chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
  513. I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
  514. I915_WRITE(EDP_PSR_DEBUG,
  515. EDP_PSR_DEBUG_MASK_MEMUP |
  516. EDP_PSR_DEBUG_MASK_HPD |
  517. EDP_PSR_DEBUG_MASK_LPSP |
  518. EDP_PSR_DEBUG_MASK_MAX_SLEEP |
  519. EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
  520. } else {
  521. /*
  522. * Per Spec: Avoid continuous PSR exit by masking MEMUP
  523. * and HPD. also mask LPSP to avoid dependency on other
  524. * drivers that might block runtime_pm besides
  525. * preventing other hw tracking issues now we can rely
  526. * on frontbuffer tracking.
  527. */
  528. I915_WRITE(EDP_PSR_DEBUG,
  529. EDP_PSR_DEBUG_MASK_MEMUP |
  530. EDP_PSR_DEBUG_MASK_HPD |
  531. EDP_PSR_DEBUG_MASK_LPSP);
  532. }
  533. }
  534. /**
  535. * intel_psr_enable - Enable PSR
  536. * @intel_dp: Intel DP
  537. * @crtc_state: new CRTC state
  538. *
  539. * This function can only be called after the pipe is fully trained and enabled.
  540. */
  541. void intel_psr_enable(struct intel_dp *intel_dp,
  542. const struct intel_crtc_state *crtc_state)
  543. {
  544. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  545. struct drm_device *dev = intel_dig_port->base.base.dev;
  546. struct drm_i915_private *dev_priv = to_i915(dev);
  547. if (!crtc_state->has_psr)
  548. return;
  549. if (WARN_ON(!CAN_PSR(dev_priv)))
  550. return;
  551. WARN_ON(dev_priv->drrs.dp);
  552. mutex_lock(&dev_priv->psr.lock);
  553. if (dev_priv->psr.enabled) {
  554. DRM_DEBUG_KMS("PSR already in use\n");
  555. goto unlock;
  556. }
  557. dev_priv->psr.psr2_support = crtc_state->has_psr2;
  558. dev_priv->psr.busy_frontbuffer_bits = 0;
  559. dev_priv->psr.setup_vsc(intel_dp, crtc_state);
  560. dev_priv->psr.enable_sink(intel_dp);
  561. dev_priv->psr.enable_source(intel_dp, crtc_state);
  562. dev_priv->psr.enabled = intel_dp;
  563. if (INTEL_GEN(dev_priv) >= 9) {
  564. intel_psr_activate(intel_dp);
  565. } else {
  566. /*
  567. * FIXME: Activation should happen immediately since this
  568. * function is just called after pipe is fully trained and
  569. * enabled.
  570. * However on some platforms we face issues when first
  571. * activation follows a modeset so quickly.
  572. * - On VLV/CHV we get bank screen on first activation
  573. * - On HSW/BDW we get a recoverable frozen screen until
  574. * next exit-activate sequence.
  575. */
  576. schedule_delayed_work(&dev_priv->psr.work,
  577. msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
  578. }
  579. unlock:
  580. mutex_unlock(&dev_priv->psr.lock);
  581. }
  582. static void vlv_psr_disable(struct intel_dp *intel_dp,
  583. const struct intel_crtc_state *old_crtc_state)
  584. {
  585. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  586. struct drm_device *dev = intel_dig_port->base.base.dev;
  587. struct drm_i915_private *dev_priv = to_i915(dev);
  588. struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  589. uint32_t val;
  590. if (dev_priv->psr.active) {
  591. /* Put VLV PSR back to PSR_state 0 (disabled). */
  592. if (intel_wait_for_register(dev_priv,
  593. VLV_PSRSTAT(crtc->pipe),
  594. VLV_EDP_PSR_IN_TRANS,
  595. 0,
  596. 1))
  597. WARN(1, "PSR transition took longer than expected\n");
  598. val = I915_READ(VLV_PSRCTL(crtc->pipe));
  599. val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
  600. val &= ~VLV_EDP_PSR_ENABLE;
  601. val &= ~VLV_EDP_PSR_MODE_MASK;
  602. I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
  603. dev_priv->psr.active = false;
  604. } else {
  605. WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
  606. }
  607. }
  608. static void hsw_psr_disable(struct intel_dp *intel_dp,
  609. const struct intel_crtc_state *old_crtc_state)
  610. {
  611. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  612. struct drm_device *dev = intel_dig_port->base.base.dev;
  613. struct drm_i915_private *dev_priv = to_i915(dev);
  614. if (dev_priv->psr.active) {
  615. i915_reg_t psr_status;
  616. u32 psr_status_mask;
  617. if (dev_priv->psr.aux_frame_sync)
  618. drm_dp_dpcd_writeb(&intel_dp->aux,
  619. DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
  620. 0);
  621. if (dev_priv->psr.psr2_support) {
  622. psr_status = EDP_PSR2_STATUS;
  623. psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
  624. I915_WRITE(EDP_PSR2_CTL,
  625. I915_READ(EDP_PSR2_CTL) &
  626. ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
  627. } else {
  628. psr_status = EDP_PSR_STATUS;
  629. psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
  630. I915_WRITE(EDP_PSR_CTL,
  631. I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
  632. }
  633. /* Wait till PSR is idle */
  634. if (intel_wait_for_register(dev_priv,
  635. psr_status, psr_status_mask, 0,
  636. 2000))
  637. DRM_ERROR("Timed out waiting for PSR Idle State\n");
  638. dev_priv->psr.active = false;
  639. } else {
  640. if (dev_priv->psr.psr2_support)
  641. WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
  642. else
  643. WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
  644. }
  645. psr_aux_io_power_put(intel_dp);
  646. }
  647. /**
  648. * intel_psr_disable - Disable PSR
  649. * @intel_dp: Intel DP
  650. * @old_crtc_state: old CRTC state
  651. *
  652. * This function needs to be called before disabling pipe.
  653. */
  654. void intel_psr_disable(struct intel_dp *intel_dp,
  655. const struct intel_crtc_state *old_crtc_state)
  656. {
  657. struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
  658. struct drm_device *dev = intel_dig_port->base.base.dev;
  659. struct drm_i915_private *dev_priv = to_i915(dev);
  660. if (!old_crtc_state->has_psr)
  661. return;
  662. if (WARN_ON(!CAN_PSR(dev_priv)))
  663. return;
  664. mutex_lock(&dev_priv->psr.lock);
  665. if (!dev_priv->psr.enabled) {
  666. mutex_unlock(&dev_priv->psr.lock);
  667. return;
  668. }
  669. dev_priv->psr.disable_source(intel_dp, old_crtc_state);
  670. /* Disable PSR on Sink */
  671. drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
  672. dev_priv->psr.enabled = NULL;
  673. mutex_unlock(&dev_priv->psr.lock);
  674. cancel_delayed_work_sync(&dev_priv->psr.work);
  675. }
  676. static void intel_psr_work(struct work_struct *work)
  677. {
  678. struct drm_i915_private *dev_priv =
  679. container_of(work, typeof(*dev_priv), psr.work.work);
  680. struct intel_dp *intel_dp = dev_priv->psr.enabled;
  681. struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
  682. enum pipe pipe = to_intel_crtc(crtc)->pipe;
  683. /* We have to make sure PSR is ready for re-enable
  684. * otherwise it keeps disabled until next full enable/disable cycle.
  685. * PSR might take some time to get fully disabled
  686. * and be ready for re-enable.
  687. */
  688. if (HAS_DDI(dev_priv)) {
  689. if (dev_priv->psr.psr2_support) {
  690. if (intel_wait_for_register(dev_priv,
  691. EDP_PSR2_STATUS,
  692. EDP_PSR2_STATUS_STATE_MASK,
  693. 0,
  694. 50)) {
  695. DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
  696. return;
  697. }
  698. } else {
  699. if (intel_wait_for_register(dev_priv,
  700. EDP_PSR_STATUS,
  701. EDP_PSR_STATUS_STATE_MASK,
  702. 0,
  703. 50)) {
  704. DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
  705. return;
  706. }
  707. }
  708. } else {
  709. if (intel_wait_for_register(dev_priv,
  710. VLV_PSRSTAT(pipe),
  711. VLV_EDP_PSR_IN_TRANS,
  712. 0,
  713. 1)) {
  714. DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
  715. return;
  716. }
  717. }
  718. mutex_lock(&dev_priv->psr.lock);
  719. intel_dp = dev_priv->psr.enabled;
  720. if (!intel_dp)
  721. goto unlock;
  722. /*
  723. * The delayed work can race with an invalidate hence we need to
  724. * recheck. Since psr_flush first clears this and then reschedules we
  725. * won't ever miss a flush when bailing out here.
  726. */
  727. if (dev_priv->psr.busy_frontbuffer_bits)
  728. goto unlock;
  729. intel_psr_activate(intel_dp);
  730. unlock:
  731. mutex_unlock(&dev_priv->psr.lock);
  732. }
  733. static void intel_psr_exit(struct drm_i915_private *dev_priv)
  734. {
  735. struct intel_dp *intel_dp = dev_priv->psr.enabled;
  736. struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
  737. enum pipe pipe = to_intel_crtc(crtc)->pipe;
  738. u32 val;
  739. if (!dev_priv->psr.active)
  740. return;
  741. if (HAS_DDI(dev_priv)) {
  742. if (dev_priv->psr.aux_frame_sync)
  743. drm_dp_dpcd_writeb(&intel_dp->aux,
  744. DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
  745. 0);
  746. if (dev_priv->psr.psr2_support) {
  747. val = I915_READ(EDP_PSR2_CTL);
  748. WARN_ON(!(val & EDP_PSR2_ENABLE));
  749. I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
  750. } else {
  751. val = I915_READ(EDP_PSR_CTL);
  752. WARN_ON(!(val & EDP_PSR_ENABLE));
  753. I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
  754. }
  755. } else {
  756. val = I915_READ(VLV_PSRCTL(pipe));
  757. /*
  758. * Here we do the transition drirectly from
  759. * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
  760. * PSR_state 5 (exit).
  761. * PSR State 4 (active with single frame update) can be skipped.
  762. * On PSR_state 5 (exit) Hardware is responsible to transition
  763. * back to PSR_state 1 (inactive).
  764. * Now we are at Same state after vlv_psr_enable_source.
  765. */
  766. val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
  767. I915_WRITE(VLV_PSRCTL(pipe), val);
  768. /*
  769. * Send AUX wake up - Spec says after transitioning to PSR
  770. * active we have to send AUX wake up by writing 01h in DPCD
  771. * 600h of sink device.
  772. * XXX: This might slow down the transition, but without this
  773. * HW doesn't complete the transition to PSR_state 1 and we
  774. * never get the screen updated.
  775. */
  776. drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
  777. DP_SET_POWER_D0);
  778. }
  779. dev_priv->psr.active = false;
  780. }
  781. /**
  782. * intel_psr_single_frame_update - Single Frame Update
  783. * @dev_priv: i915 device
  784. * @frontbuffer_bits: frontbuffer plane tracking bits
  785. *
  786. * Some platforms support a single frame update feature that is used to
  787. * send and update only one frame on Remote Frame Buffer.
  788. * So far it is only implemented for Valleyview and Cherryview because
  789. * hardware requires this to be done before a page flip.
  790. */
  791. void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
  792. unsigned frontbuffer_bits)
  793. {
  794. struct drm_crtc *crtc;
  795. enum pipe pipe;
  796. u32 val;
  797. if (!CAN_PSR(dev_priv))
  798. return;
  799. /*
  800. * Single frame update is already supported on BDW+ but it requires
  801. * many W/A and it isn't really needed.
  802. */
  803. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
  804. return;
  805. mutex_lock(&dev_priv->psr.lock);
  806. if (!dev_priv->psr.enabled) {
  807. mutex_unlock(&dev_priv->psr.lock);
  808. return;
  809. }
  810. crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
  811. pipe = to_intel_crtc(crtc)->pipe;
  812. if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
  813. val = I915_READ(VLV_PSRCTL(pipe));
  814. /*
  815. * We need to set this bit before writing registers for a flip.
  816. * This bit will be self-clear when it gets to the PSR active state.
  817. */
  818. I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
  819. }
  820. mutex_unlock(&dev_priv->psr.lock);
  821. }
  822. /**
  823. * intel_psr_invalidate - Invalidade PSR
  824. * @dev_priv: i915 device
  825. * @frontbuffer_bits: frontbuffer plane tracking bits
  826. *
  827. * Since the hardware frontbuffer tracking has gaps we need to integrate
  828. * with the software frontbuffer tracking. This function gets called every
  829. * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
  830. * disabled if the frontbuffer mask contains a buffer relevant to PSR.
  831. *
  832. * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
  833. */
  834. void intel_psr_invalidate(struct drm_i915_private *dev_priv,
  835. unsigned frontbuffer_bits)
  836. {
  837. struct drm_crtc *crtc;
  838. enum pipe pipe;
  839. if (!CAN_PSR(dev_priv))
  840. return;
  841. mutex_lock(&dev_priv->psr.lock);
  842. if (!dev_priv->psr.enabled) {
  843. mutex_unlock(&dev_priv->psr.lock);
  844. return;
  845. }
  846. crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
  847. pipe = to_intel_crtc(crtc)->pipe;
  848. frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
  849. dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
  850. if (frontbuffer_bits)
  851. intel_psr_exit(dev_priv);
  852. mutex_unlock(&dev_priv->psr.lock);
  853. }
  854. /**
  855. * intel_psr_flush - Flush PSR
  856. * @dev_priv: i915 device
  857. * @frontbuffer_bits: frontbuffer plane tracking bits
  858. * @origin: which operation caused the flush
  859. *
  860. * Since the hardware frontbuffer tracking has gaps we need to integrate
  861. * with the software frontbuffer tracking. This function gets called every
  862. * time frontbuffer rendering has completed and flushed out to memory. PSR
  863. * can be enabled again if no other frontbuffer relevant to PSR is dirty.
  864. *
  865. * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
  866. */
  867. void intel_psr_flush(struct drm_i915_private *dev_priv,
  868. unsigned frontbuffer_bits, enum fb_op_origin origin)
  869. {
  870. struct drm_crtc *crtc;
  871. enum pipe pipe;
  872. if (!CAN_PSR(dev_priv))
  873. return;
  874. mutex_lock(&dev_priv->psr.lock);
  875. if (!dev_priv->psr.enabled) {
  876. mutex_unlock(&dev_priv->psr.lock);
  877. return;
  878. }
  879. crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
  880. pipe = to_intel_crtc(crtc)->pipe;
  881. frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
  882. dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
  883. /* By definition flush = invalidate + flush */
  884. if (frontbuffer_bits)
  885. intel_psr_exit(dev_priv);
  886. if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
  887. if (!work_busy(&dev_priv->psr.work.work))
  888. schedule_delayed_work(&dev_priv->psr.work,
  889. msecs_to_jiffies(100));
  890. mutex_unlock(&dev_priv->psr.lock);
  891. }
  892. /**
  893. * intel_psr_init - Init basic PSR work and mutex.
  894. * @dev_priv: i915 device private
  895. *
  896. * This function is called only once at driver load to initialize basic
  897. * PSR stuff.
  898. */
  899. void intel_psr_init(struct drm_i915_private *dev_priv)
  900. {
  901. if (!HAS_PSR(dev_priv))
  902. return;
  903. dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
  904. HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
  905. if (!dev_priv->psr.sink_support)
  906. return;
  907. /* Per platform default: all disabled. */
  908. if (i915_modparams.enable_psr == -1)
  909. i915_modparams.enable_psr = 0;
  910. /* Set link_standby x link_off defaults */
  911. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  912. /* HSW and BDW require workarounds that we don't implement. */
  913. dev_priv->psr.link_standby = false;
  914. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  915. /* On VLV and CHV only standby mode is supported. */
  916. dev_priv->psr.link_standby = true;
  917. else
  918. /* For new platforms let's respect VBT back again */
  919. dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
  920. /* Override link_standby x link_off defaults */
  921. if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
  922. DRM_DEBUG_KMS("PSR: Forcing link standby\n");
  923. dev_priv->psr.link_standby = true;
  924. }
  925. if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
  926. DRM_DEBUG_KMS("PSR: Forcing main link off\n");
  927. dev_priv->psr.link_standby = false;
  928. }
  929. INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
  930. mutex_init(&dev_priv->psr.lock);
  931. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  932. dev_priv->psr.enable_source = vlv_psr_enable_source;
  933. dev_priv->psr.disable_source = vlv_psr_disable;
  934. dev_priv->psr.enable_sink = vlv_psr_enable_sink;
  935. dev_priv->psr.activate = vlv_psr_activate;
  936. dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
  937. } else {
  938. dev_priv->psr.enable_source = hsw_psr_enable_source;
  939. dev_priv->psr.disable_source = hsw_psr_disable;
  940. dev_priv->psr.enable_sink = hsw_psr_enable_sink;
  941. dev_priv->psr.activate = hsw_psr_activate;
  942. dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
  943. }
  944. }