intel_fbc.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264
  1. /*
  2. * Copyright © 2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. */
  23. /**
  24. * DOC: Frame Buffer Compression (FBC)
  25. *
  26. * FBC tries to save memory bandwidth (and so power consumption) by
  27. * compressing the amount of memory used by the display. It is total
  28. * transparent to user space and completely handled in the kernel.
  29. *
  30. * The benefits of FBC are mostly visible with solid backgrounds and
  31. * variation-less patterns. It comes from keeping the memory footprint small
  32. * and having fewer memory pages opened and accessed for refreshing the display.
  33. *
  34. * i915 is responsible to reserve stolen memory for FBC and configure its
  35. * offset on proper registers. The hardware takes care of all
  36. * compress/decompress. However there are many known cases where we have to
  37. * forcibly disable it to allow proper screen updates.
  38. */
  39. #include "intel_drv.h"
  40. #include "i915_drv.h"
  41. static inline bool fbc_supported(struct drm_i915_private *dev_priv)
  42. {
  43. return HAS_FBC(dev_priv);
  44. }
  45. static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
  46. {
  47. return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
  48. }
  49. static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
  50. {
  51. return INTEL_INFO(dev_priv)->gen < 4;
  52. }
  53. static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
  54. {
  55. return INTEL_INFO(dev_priv)->gen <= 3;
  56. }
  57. /*
  58. * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
  59. * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
  60. * origin so the x and y offsets can actually fit the registers. As a
  61. * consequence, the fence doesn't really start exactly at the display plane
  62. * address we program because it starts at the real start of the buffer, so we
  63. * have to take this into consideration here.
  64. */
  65. static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
  66. {
  67. return crtc->base.y - crtc->adjusted_y;
  68. }
  69. /*
  70. * For SKL+, the plane source size used by the hardware is based on the value we
  71. * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
  72. * we wrote to PIPESRC.
  73. */
  74. static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
  75. int *width, int *height)
  76. {
  77. int w, h;
  78. if (intel_rotation_90_or_270(cache->plane.rotation)) {
  79. w = cache->plane.src_h;
  80. h = cache->plane.src_w;
  81. } else {
  82. w = cache->plane.src_w;
  83. h = cache->plane.src_h;
  84. }
  85. if (width)
  86. *width = w;
  87. if (height)
  88. *height = h;
  89. }
  90. static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
  91. struct intel_fbc_state_cache *cache)
  92. {
  93. int lines;
  94. intel_fbc_get_plane_source_size(cache, NULL, &lines);
  95. if (INTEL_INFO(dev_priv)->gen >= 7)
  96. lines = min(lines, 2048);
  97. /* Hardware needs the full buffer stride, not just the active area. */
  98. return lines * cache->fb.stride;
  99. }
  100. static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
  101. {
  102. u32 fbc_ctl;
  103. /* Disable compression */
  104. fbc_ctl = I915_READ(FBC_CONTROL);
  105. if ((fbc_ctl & FBC_CTL_EN) == 0)
  106. return;
  107. fbc_ctl &= ~FBC_CTL_EN;
  108. I915_WRITE(FBC_CONTROL, fbc_ctl);
  109. /* Wait for compressing bit to clear */
  110. if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
  111. DRM_DEBUG_KMS("FBC idle timed out\n");
  112. return;
  113. }
  114. }
  115. static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
  116. {
  117. struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
  118. int cfb_pitch;
  119. int i;
  120. u32 fbc_ctl;
  121. /* Note: fbc.threshold == 1 for i8xx */
  122. cfb_pitch = params->cfb_size / FBC_LL_SIZE;
  123. if (params->fb.stride < cfb_pitch)
  124. cfb_pitch = params->fb.stride;
  125. /* FBC_CTL wants 32B or 64B units */
  126. if (IS_GEN2(dev_priv))
  127. cfb_pitch = (cfb_pitch / 32) - 1;
  128. else
  129. cfb_pitch = (cfb_pitch / 64) - 1;
  130. /* Clear old tags */
  131. for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
  132. I915_WRITE(FBC_TAG(i), 0);
  133. if (IS_GEN4(dev_priv)) {
  134. u32 fbc_ctl2;
  135. /* Set it up... */
  136. fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
  137. fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane);
  138. I915_WRITE(FBC_CONTROL2, fbc_ctl2);
  139. I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
  140. }
  141. /* enable it... */
  142. fbc_ctl = I915_READ(FBC_CONTROL);
  143. fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
  144. fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
  145. if (IS_I945GM(dev_priv))
  146. fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
  147. fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
  148. fbc_ctl |= params->fb.fence_reg;
  149. I915_WRITE(FBC_CONTROL, fbc_ctl);
  150. }
  151. static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
  152. {
  153. return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
  154. }
  155. static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
  156. {
  157. struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
  158. u32 dpfc_ctl;
  159. dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN;
  160. if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
  161. dpfc_ctl |= DPFC_CTL_LIMIT_2X;
  162. else
  163. dpfc_ctl |= DPFC_CTL_LIMIT_1X;
  164. dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
  165. I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
  166. /* enable it... */
  167. I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  168. }
  169. static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
  170. {
  171. u32 dpfc_ctl;
  172. /* Disable compression */
  173. dpfc_ctl = I915_READ(DPFC_CONTROL);
  174. if (dpfc_ctl & DPFC_CTL_EN) {
  175. dpfc_ctl &= ~DPFC_CTL_EN;
  176. I915_WRITE(DPFC_CONTROL, dpfc_ctl);
  177. }
  178. }
  179. static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
  180. {
  181. return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
  182. }
  183. /* This function forces a CFB recompression through the nuke operation. */
  184. static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
  185. {
  186. I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
  187. POSTING_READ(MSG_FBC_REND_STATE);
  188. }
  189. static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
  190. {
  191. struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
  192. u32 dpfc_ctl;
  193. int threshold = dev_priv->fbc.threshold;
  194. dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
  195. if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
  196. threshold++;
  197. switch (threshold) {
  198. case 4:
  199. case 3:
  200. dpfc_ctl |= DPFC_CTL_LIMIT_4X;
  201. break;
  202. case 2:
  203. dpfc_ctl |= DPFC_CTL_LIMIT_2X;
  204. break;
  205. case 1:
  206. dpfc_ctl |= DPFC_CTL_LIMIT_1X;
  207. break;
  208. }
  209. dpfc_ctl |= DPFC_CTL_FENCE_EN;
  210. if (IS_GEN5(dev_priv))
  211. dpfc_ctl |= params->fb.fence_reg;
  212. I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
  213. I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
  214. /* enable it... */
  215. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  216. if (IS_GEN6(dev_priv)) {
  217. I915_WRITE(SNB_DPFC_CTL_SA,
  218. SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
  219. I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
  220. }
  221. intel_fbc_recompress(dev_priv);
  222. }
  223. static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
  224. {
  225. u32 dpfc_ctl;
  226. /* Disable compression */
  227. dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
  228. if (dpfc_ctl & DPFC_CTL_EN) {
  229. dpfc_ctl &= ~DPFC_CTL_EN;
  230. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
  231. }
  232. }
  233. static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
  234. {
  235. return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
  236. }
  237. static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
  238. {
  239. struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
  240. u32 dpfc_ctl;
  241. int threshold = dev_priv->fbc.threshold;
  242. dpfc_ctl = 0;
  243. if (IS_IVYBRIDGE(dev_priv))
  244. dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
  245. if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
  246. threshold++;
  247. switch (threshold) {
  248. case 4:
  249. case 3:
  250. dpfc_ctl |= DPFC_CTL_LIMIT_4X;
  251. break;
  252. case 2:
  253. dpfc_ctl |= DPFC_CTL_LIMIT_2X;
  254. break;
  255. case 1:
  256. dpfc_ctl |= DPFC_CTL_LIMIT_1X;
  257. break;
  258. }
  259. dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
  260. if (dev_priv->fbc.false_color)
  261. dpfc_ctl |= FBC_CTL_FALSE_COLOR;
  262. if (IS_IVYBRIDGE(dev_priv)) {
  263. /* WaFbcAsynchFlipDisableFbcQueue:ivb */
  264. I915_WRITE(ILK_DISPLAY_CHICKEN1,
  265. I915_READ(ILK_DISPLAY_CHICKEN1) |
  266. ILK_FBCQ_DIS);
  267. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  268. /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
  269. I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
  270. I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
  271. HSW_FBCQ_DIS);
  272. }
  273. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  274. I915_WRITE(SNB_DPFC_CTL_SA,
  275. SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
  276. I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
  277. intel_fbc_recompress(dev_priv);
  278. }
  279. static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
  280. {
  281. if (INTEL_INFO(dev_priv)->gen >= 5)
  282. return ilk_fbc_is_active(dev_priv);
  283. else if (IS_GM45(dev_priv))
  284. return g4x_fbc_is_active(dev_priv);
  285. else
  286. return i8xx_fbc_is_active(dev_priv);
  287. }
  288. static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
  289. {
  290. struct intel_fbc *fbc = &dev_priv->fbc;
  291. fbc->active = true;
  292. if (INTEL_INFO(dev_priv)->gen >= 7)
  293. gen7_fbc_activate(dev_priv);
  294. else if (INTEL_INFO(dev_priv)->gen >= 5)
  295. ilk_fbc_activate(dev_priv);
  296. else if (IS_GM45(dev_priv))
  297. g4x_fbc_activate(dev_priv);
  298. else
  299. i8xx_fbc_activate(dev_priv);
  300. }
  301. static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
  302. {
  303. struct intel_fbc *fbc = &dev_priv->fbc;
  304. fbc->active = false;
  305. if (INTEL_INFO(dev_priv)->gen >= 5)
  306. ilk_fbc_deactivate(dev_priv);
  307. else if (IS_GM45(dev_priv))
  308. g4x_fbc_deactivate(dev_priv);
  309. else
  310. i8xx_fbc_deactivate(dev_priv);
  311. }
  312. /**
  313. * intel_fbc_is_active - Is FBC active?
  314. * @dev_priv: i915 device instance
  315. *
  316. * This function is used to verify the current state of FBC.
  317. * FIXME: This should be tracked in the plane config eventually
  318. * instead of queried at runtime for most callers.
  319. */
  320. bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
  321. {
  322. return dev_priv->fbc.active;
  323. }
  324. static void intel_fbc_work_fn(struct work_struct *__work)
  325. {
  326. struct drm_i915_private *dev_priv =
  327. container_of(__work, struct drm_i915_private, fbc.work.work);
  328. struct intel_fbc *fbc = &dev_priv->fbc;
  329. struct intel_fbc_work *work = &fbc->work;
  330. struct intel_crtc *crtc = fbc->crtc;
  331. struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
  332. if (drm_crtc_vblank_get(&crtc->base)) {
  333. DRM_ERROR("vblank not available for FBC on pipe %c\n",
  334. pipe_name(crtc->pipe));
  335. mutex_lock(&fbc->lock);
  336. work->scheduled = false;
  337. mutex_unlock(&fbc->lock);
  338. return;
  339. }
  340. retry:
  341. /* Delay the actual enabling to let pageflipping cease and the
  342. * display to settle before starting the compression. Note that
  343. * this delay also serves a second purpose: it allows for a
  344. * vblank to pass after disabling the FBC before we attempt
  345. * to modify the control registers.
  346. *
  347. * WaFbcWaitForVBlankBeforeEnable:ilk,snb
  348. *
  349. * It is also worth mentioning that since work->scheduled_vblank can be
  350. * updated multiple times by the other threads, hitting the timeout is
  351. * not an error condition. We'll just end up hitting the "goto retry"
  352. * case below.
  353. */
  354. wait_event_timeout(vblank->queue,
  355. drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
  356. msecs_to_jiffies(50));
  357. mutex_lock(&fbc->lock);
  358. /* Were we cancelled? */
  359. if (!work->scheduled)
  360. goto out;
  361. /* Were we delayed again while this function was sleeping? */
  362. if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
  363. mutex_unlock(&fbc->lock);
  364. goto retry;
  365. }
  366. intel_fbc_hw_activate(dev_priv);
  367. work->scheduled = false;
  368. out:
  369. mutex_unlock(&fbc->lock);
  370. drm_crtc_vblank_put(&crtc->base);
  371. }
  372. static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
  373. {
  374. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  375. struct intel_fbc *fbc = &dev_priv->fbc;
  376. struct intel_fbc_work *work = &fbc->work;
  377. WARN_ON(!mutex_is_locked(&fbc->lock));
  378. if (drm_crtc_vblank_get(&crtc->base)) {
  379. DRM_ERROR("vblank not available for FBC on pipe %c\n",
  380. pipe_name(crtc->pipe));
  381. return;
  382. }
  383. /* It is useless to call intel_fbc_cancel_work() or cancel_work() in
  384. * this function since we're not releasing fbc.lock, so it won't have an
  385. * opportunity to grab it to discover that it was cancelled. So we just
  386. * update the expected jiffy count. */
  387. work->scheduled = true;
  388. work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
  389. drm_crtc_vblank_put(&crtc->base);
  390. schedule_work(&work->work);
  391. }
  392. static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
  393. {
  394. struct intel_fbc *fbc = &dev_priv->fbc;
  395. WARN_ON(!mutex_is_locked(&fbc->lock));
  396. /* Calling cancel_work() here won't help due to the fact that the work
  397. * function grabs fbc->lock. Just set scheduled to false so the work
  398. * function can know it was cancelled. */
  399. fbc->work.scheduled = false;
  400. if (fbc->active)
  401. intel_fbc_hw_deactivate(dev_priv);
  402. }
  403. static bool multiple_pipes_ok(struct intel_crtc *crtc)
  404. {
  405. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  406. struct drm_plane *primary = crtc->base.primary;
  407. struct intel_fbc *fbc = &dev_priv->fbc;
  408. enum pipe pipe = crtc->pipe;
  409. /* Don't even bother tracking anything we don't need. */
  410. if (!no_fbc_on_multiple_pipes(dev_priv))
  411. return true;
  412. WARN_ON(!drm_modeset_is_locked(&primary->mutex));
  413. if (to_intel_plane_state(primary->state)->visible)
  414. fbc->visible_pipes_mask |= (1 << pipe);
  415. else
  416. fbc->visible_pipes_mask &= ~(1 << pipe);
  417. return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
  418. }
  419. static int find_compression_threshold(struct drm_i915_private *dev_priv,
  420. struct drm_mm_node *node,
  421. int size,
  422. int fb_cpp)
  423. {
  424. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  425. int compression_threshold = 1;
  426. int ret;
  427. u64 end;
  428. /* The FBC hardware for BDW/SKL doesn't have access to the stolen
  429. * reserved range size, so it always assumes the maximum (8mb) is used.
  430. * If we enable FBC using a CFB on that memory range we'll get FIFO
  431. * underruns, even if that range is not reserved by the BIOS. */
  432. if (IS_BROADWELL(dev_priv) ||
  433. IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  434. end = ggtt->stolen_size - 8 * 1024 * 1024;
  435. else
  436. end = ggtt->stolen_usable_size;
  437. /* HACK: This code depends on what we will do in *_enable_fbc. If that
  438. * code changes, this code needs to change as well.
  439. *
  440. * The enable_fbc code will attempt to use one of our 2 compression
  441. * thresholds, therefore, in that case, we only have 1 resort.
  442. */
  443. /* Try to over-allocate to reduce reallocations and fragmentation. */
  444. ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
  445. 4096, 0, end);
  446. if (ret == 0)
  447. return compression_threshold;
  448. again:
  449. /* HW's ability to limit the CFB is 1:4 */
  450. if (compression_threshold > 4 ||
  451. (fb_cpp == 2 && compression_threshold == 2))
  452. return 0;
  453. ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
  454. 4096, 0, end);
  455. if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
  456. return 0;
  457. } else if (ret) {
  458. compression_threshold <<= 1;
  459. goto again;
  460. } else {
  461. return compression_threshold;
  462. }
  463. }
  464. static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
  465. {
  466. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  467. struct intel_fbc *fbc = &dev_priv->fbc;
  468. struct drm_mm_node *uninitialized_var(compressed_llb);
  469. int size, fb_cpp, ret;
  470. WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
  471. size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
  472. fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0);
  473. ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
  474. size, fb_cpp);
  475. if (!ret)
  476. goto err_llb;
  477. else if (ret > 1) {
  478. DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
  479. }
  480. fbc->threshold = ret;
  481. if (INTEL_INFO(dev_priv)->gen >= 5)
  482. I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
  483. else if (IS_GM45(dev_priv)) {
  484. I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
  485. } else {
  486. compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
  487. if (!compressed_llb)
  488. goto err_fb;
  489. ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
  490. 4096, 4096);
  491. if (ret)
  492. goto err_fb;
  493. fbc->compressed_llb = compressed_llb;
  494. I915_WRITE(FBC_CFB_BASE,
  495. dev_priv->mm.stolen_base + fbc->compressed_fb.start);
  496. I915_WRITE(FBC_LL_BASE,
  497. dev_priv->mm.stolen_base + compressed_llb->start);
  498. }
  499. DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
  500. fbc->compressed_fb.size, fbc->threshold);
  501. return 0;
  502. err_fb:
  503. kfree(compressed_llb);
  504. i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
  505. err_llb:
  506. pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
  507. return -ENOSPC;
  508. }
  509. static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
  510. {
  511. struct intel_fbc *fbc = &dev_priv->fbc;
  512. if (drm_mm_node_allocated(&fbc->compressed_fb))
  513. i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
  514. if (fbc->compressed_llb) {
  515. i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
  516. kfree(fbc->compressed_llb);
  517. }
  518. }
  519. void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
  520. {
  521. struct intel_fbc *fbc = &dev_priv->fbc;
  522. if (!fbc_supported(dev_priv))
  523. return;
  524. mutex_lock(&fbc->lock);
  525. __intel_fbc_cleanup_cfb(dev_priv);
  526. mutex_unlock(&fbc->lock);
  527. }
  528. static bool stride_is_valid(struct drm_i915_private *dev_priv,
  529. unsigned int stride)
  530. {
  531. /* These should have been caught earlier. */
  532. WARN_ON(stride < 512);
  533. WARN_ON((stride & (64 - 1)) != 0);
  534. /* Below are the additional FBC restrictions. */
  535. if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
  536. return stride == 4096 || stride == 8192;
  537. if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
  538. return false;
  539. if (stride > 16384)
  540. return false;
  541. return true;
  542. }
  543. static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
  544. uint32_t pixel_format)
  545. {
  546. switch (pixel_format) {
  547. case DRM_FORMAT_XRGB8888:
  548. case DRM_FORMAT_XBGR8888:
  549. return true;
  550. case DRM_FORMAT_XRGB1555:
  551. case DRM_FORMAT_RGB565:
  552. /* 16bpp not supported on gen2 */
  553. if (IS_GEN2(dev_priv))
  554. return false;
  555. /* WaFbcOnly1to1Ratio:ctg */
  556. if (IS_G4X(dev_priv))
  557. return false;
  558. return true;
  559. default:
  560. return false;
  561. }
  562. }
  563. /*
  564. * For some reason, the hardware tracking starts looking at whatever we
  565. * programmed as the display plane base address register. It does not look at
  566. * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
  567. * variables instead of just looking at the pipe/plane size.
  568. */
  569. static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
  570. {
  571. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  572. struct intel_fbc *fbc = &dev_priv->fbc;
  573. unsigned int effective_w, effective_h, max_w, max_h;
  574. if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
  575. max_w = 4096;
  576. max_h = 4096;
  577. } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
  578. max_w = 4096;
  579. max_h = 2048;
  580. } else {
  581. max_w = 2048;
  582. max_h = 1536;
  583. }
  584. intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
  585. &effective_h);
  586. effective_w += crtc->adjusted_x;
  587. effective_h += crtc->adjusted_y;
  588. return effective_w <= max_w && effective_h <= max_h;
  589. }
  590. static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
  591. {
  592. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  593. struct intel_fbc *fbc = &dev_priv->fbc;
  594. struct intel_fbc_state_cache *cache = &fbc->state_cache;
  595. struct intel_crtc_state *crtc_state =
  596. to_intel_crtc_state(crtc->base.state);
  597. struct intel_plane_state *plane_state =
  598. to_intel_plane_state(crtc->base.primary->state);
  599. struct drm_framebuffer *fb = plane_state->base.fb;
  600. struct drm_i915_gem_object *obj;
  601. WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
  602. WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
  603. cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
  604. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  605. cache->crtc.hsw_bdw_pixel_rate =
  606. ilk_pipe_pixel_rate(crtc_state);
  607. cache->plane.rotation = plane_state->base.rotation;
  608. cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16;
  609. cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16;
  610. cache->plane.visible = plane_state->visible;
  611. if (!cache->plane.visible)
  612. return;
  613. obj = intel_fb_obj(fb);
  614. /* FIXME: We lack the proper locking here, so only run this on the
  615. * platforms that need. */
  616. if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7)
  617. cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
  618. cache->fb.pixel_format = fb->pixel_format;
  619. cache->fb.stride = fb->pitches[0];
  620. cache->fb.fence_reg = obj->fence_reg;
  621. cache->fb.tiling_mode = obj->tiling_mode;
  622. }
  623. static bool intel_fbc_can_activate(struct intel_crtc *crtc)
  624. {
  625. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  626. struct intel_fbc *fbc = &dev_priv->fbc;
  627. struct intel_fbc_state_cache *cache = &fbc->state_cache;
  628. if (!cache->plane.visible) {
  629. fbc->no_fbc_reason = "primary plane not visible";
  630. return false;
  631. }
  632. if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) ||
  633. (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) {
  634. fbc->no_fbc_reason = "incompatible mode";
  635. return false;
  636. }
  637. if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
  638. fbc->no_fbc_reason = "mode too large for compression";
  639. return false;
  640. }
  641. /* The use of a CPU fence is mandatory in order to detect writes
  642. * by the CPU to the scanout and trigger updates to the FBC.
  643. */
  644. if (cache->fb.tiling_mode != I915_TILING_X ||
  645. cache->fb.fence_reg == I915_FENCE_REG_NONE) {
  646. fbc->no_fbc_reason = "framebuffer not tiled or fenced";
  647. return false;
  648. }
  649. if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
  650. cache->plane.rotation != BIT(DRM_ROTATE_0)) {
  651. fbc->no_fbc_reason = "rotation unsupported";
  652. return false;
  653. }
  654. if (!stride_is_valid(dev_priv, cache->fb.stride)) {
  655. fbc->no_fbc_reason = "framebuffer stride not supported";
  656. return false;
  657. }
  658. if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) {
  659. fbc->no_fbc_reason = "pixel format is invalid";
  660. return false;
  661. }
  662. /* WaFbcExceedCdClockThreshold:hsw,bdw */
  663. if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
  664. cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) {
  665. fbc->no_fbc_reason = "pixel rate is too big";
  666. return false;
  667. }
  668. /* It is possible for the required CFB size change without a
  669. * crtc->disable + crtc->enable since it is possible to change the
  670. * stride without triggering a full modeset. Since we try to
  671. * over-allocate the CFB, there's a chance we may keep FBC enabled even
  672. * if this happens, but if we exceed the current CFB size we'll have to
  673. * disable FBC. Notice that it would be possible to disable FBC, wait
  674. * for a frame, free the stolen node, then try to reenable FBC in case
  675. * we didn't get any invalidate/deactivate calls, but this would require
  676. * a lot of tracking just for a specific case. If we conclude it's an
  677. * important case, we can implement it later. */
  678. if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
  679. fbc->compressed_fb.size * fbc->threshold) {
  680. fbc->no_fbc_reason = "CFB requirements changed";
  681. return false;
  682. }
  683. return true;
  684. }
  685. static bool intel_fbc_can_choose(struct intel_crtc *crtc)
  686. {
  687. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  688. struct intel_fbc *fbc = &dev_priv->fbc;
  689. bool enable_by_default = IS_HASWELL(dev_priv) ||
  690. IS_BROADWELL(dev_priv);
  691. if (intel_vgpu_active(dev_priv->dev)) {
  692. fbc->no_fbc_reason = "VGPU is active";
  693. return false;
  694. }
  695. if (i915.enable_fbc < 0 && !enable_by_default) {
  696. fbc->no_fbc_reason = "disabled per chip default";
  697. return false;
  698. }
  699. if (!i915.enable_fbc) {
  700. fbc->no_fbc_reason = "disabled per module param";
  701. return false;
  702. }
  703. if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
  704. fbc->no_fbc_reason = "no enabled pipes can have FBC";
  705. return false;
  706. }
  707. if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
  708. fbc->no_fbc_reason = "no enabled planes can have FBC";
  709. return false;
  710. }
  711. return true;
  712. }
  713. static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
  714. struct intel_fbc_reg_params *params)
  715. {
  716. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  717. struct intel_fbc *fbc = &dev_priv->fbc;
  718. struct intel_fbc_state_cache *cache = &fbc->state_cache;
  719. /* Since all our fields are integer types, use memset here so the
  720. * comparison function can rely on memcmp because the padding will be
  721. * zero. */
  722. memset(params, 0, sizeof(*params));
  723. params->crtc.pipe = crtc->pipe;
  724. params->crtc.plane = crtc->plane;
  725. params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
  726. params->fb.pixel_format = cache->fb.pixel_format;
  727. params->fb.stride = cache->fb.stride;
  728. params->fb.fence_reg = cache->fb.fence_reg;
  729. params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
  730. params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
  731. }
  732. static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
  733. struct intel_fbc_reg_params *params2)
  734. {
  735. /* We can use this since intel_fbc_get_reg_params() does a memset. */
  736. return memcmp(params1, params2, sizeof(*params1)) == 0;
  737. }
  738. void intel_fbc_pre_update(struct intel_crtc *crtc)
  739. {
  740. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  741. struct intel_fbc *fbc = &dev_priv->fbc;
  742. if (!fbc_supported(dev_priv))
  743. return;
  744. mutex_lock(&fbc->lock);
  745. if (!multiple_pipes_ok(crtc)) {
  746. fbc->no_fbc_reason = "more than one pipe active";
  747. goto deactivate;
  748. }
  749. if (!fbc->enabled || fbc->crtc != crtc)
  750. goto unlock;
  751. intel_fbc_update_state_cache(crtc);
  752. deactivate:
  753. intel_fbc_deactivate(dev_priv);
  754. unlock:
  755. mutex_unlock(&fbc->lock);
  756. }
  757. static void __intel_fbc_post_update(struct intel_crtc *crtc)
  758. {
  759. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  760. struct intel_fbc *fbc = &dev_priv->fbc;
  761. struct intel_fbc_reg_params old_params;
  762. WARN_ON(!mutex_is_locked(&fbc->lock));
  763. if (!fbc->enabled || fbc->crtc != crtc)
  764. return;
  765. if (!intel_fbc_can_activate(crtc)) {
  766. WARN_ON(fbc->active);
  767. return;
  768. }
  769. old_params = fbc->params;
  770. intel_fbc_get_reg_params(crtc, &fbc->params);
  771. /* If the scanout has not changed, don't modify the FBC settings.
  772. * Note that we make the fundamental assumption that the fb->obj
  773. * cannot be unpinned (and have its GTT offset and fence revoked)
  774. * without first being decoupled from the scanout and FBC disabled.
  775. */
  776. if (fbc->active &&
  777. intel_fbc_reg_params_equal(&old_params, &fbc->params))
  778. return;
  779. intel_fbc_deactivate(dev_priv);
  780. intel_fbc_schedule_activation(crtc);
  781. fbc->no_fbc_reason = "FBC enabled (active or scheduled)";
  782. }
  783. void intel_fbc_post_update(struct intel_crtc *crtc)
  784. {
  785. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  786. struct intel_fbc *fbc = &dev_priv->fbc;
  787. if (!fbc_supported(dev_priv))
  788. return;
  789. mutex_lock(&fbc->lock);
  790. __intel_fbc_post_update(crtc);
  791. mutex_unlock(&fbc->lock);
  792. }
  793. static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
  794. {
  795. if (fbc->enabled)
  796. return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
  797. else
  798. return fbc->possible_framebuffer_bits;
  799. }
  800. void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
  801. unsigned int frontbuffer_bits,
  802. enum fb_op_origin origin)
  803. {
  804. struct intel_fbc *fbc = &dev_priv->fbc;
  805. if (!fbc_supported(dev_priv))
  806. return;
  807. if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
  808. return;
  809. mutex_lock(&fbc->lock);
  810. fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
  811. if (fbc->enabled && fbc->busy_bits)
  812. intel_fbc_deactivate(dev_priv);
  813. mutex_unlock(&fbc->lock);
  814. }
  815. void intel_fbc_flush(struct drm_i915_private *dev_priv,
  816. unsigned int frontbuffer_bits, enum fb_op_origin origin)
  817. {
  818. struct intel_fbc *fbc = &dev_priv->fbc;
  819. if (!fbc_supported(dev_priv))
  820. return;
  821. if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
  822. return;
  823. mutex_lock(&fbc->lock);
  824. fbc->busy_bits &= ~frontbuffer_bits;
  825. if (!fbc->busy_bits && fbc->enabled &&
  826. (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
  827. if (fbc->active)
  828. intel_fbc_recompress(dev_priv);
  829. else
  830. __intel_fbc_post_update(fbc->crtc);
  831. }
  832. mutex_unlock(&fbc->lock);
  833. }
  834. /**
  835. * intel_fbc_choose_crtc - select a CRTC to enable FBC on
  836. * @dev_priv: i915 device instance
  837. * @state: the atomic state structure
  838. *
  839. * This function looks at the proposed state for CRTCs and planes, then chooses
  840. * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
  841. * true.
  842. *
  843. * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
  844. * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
  845. */
  846. void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
  847. struct drm_atomic_state *state)
  848. {
  849. struct intel_fbc *fbc = &dev_priv->fbc;
  850. struct drm_crtc *crtc;
  851. struct drm_crtc_state *crtc_state;
  852. struct drm_plane *plane;
  853. struct drm_plane_state *plane_state;
  854. bool fbc_crtc_present = false;
  855. int i, j;
  856. mutex_lock(&fbc->lock);
  857. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  858. if (fbc->crtc == to_intel_crtc(crtc)) {
  859. fbc_crtc_present = true;
  860. break;
  861. }
  862. }
  863. /* This atomic commit doesn't involve the CRTC currently tied to FBC. */
  864. if (!fbc_crtc_present && fbc->crtc != NULL)
  865. goto out;
  866. /* Simply choose the first CRTC that is compatible and has a visible
  867. * plane. We could go for fancier schemes such as checking the plane
  868. * size, but this would just affect the few platforms that don't tie FBC
  869. * to pipe or plane A. */
  870. for_each_plane_in_state(state, plane, plane_state, i) {
  871. struct intel_plane_state *intel_plane_state =
  872. to_intel_plane_state(plane_state);
  873. if (!intel_plane_state->visible)
  874. continue;
  875. for_each_crtc_in_state(state, crtc, crtc_state, j) {
  876. struct intel_crtc_state *intel_crtc_state =
  877. to_intel_crtc_state(crtc_state);
  878. if (plane_state->crtc != crtc)
  879. continue;
  880. if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
  881. break;
  882. intel_crtc_state->enable_fbc = true;
  883. goto out;
  884. }
  885. }
  886. out:
  887. mutex_unlock(&fbc->lock);
  888. }
  889. /**
  890. * intel_fbc_enable: tries to enable FBC on the CRTC
  891. * @crtc: the CRTC
  892. *
  893. * This function checks if the given CRTC was chosen for FBC, then enables it if
  894. * possible. Notice that it doesn't activate FBC. It is valid to call
  895. * intel_fbc_enable multiple times for the same pipe without an
  896. * intel_fbc_disable in the middle, as long as it is deactivated.
  897. */
  898. void intel_fbc_enable(struct intel_crtc *crtc)
  899. {
  900. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  901. struct intel_fbc *fbc = &dev_priv->fbc;
  902. if (!fbc_supported(dev_priv))
  903. return;
  904. mutex_lock(&fbc->lock);
  905. if (fbc->enabled) {
  906. WARN_ON(fbc->crtc == NULL);
  907. if (fbc->crtc == crtc) {
  908. WARN_ON(!crtc->config->enable_fbc);
  909. WARN_ON(fbc->active);
  910. }
  911. goto out;
  912. }
  913. if (!crtc->config->enable_fbc)
  914. goto out;
  915. WARN_ON(fbc->active);
  916. WARN_ON(fbc->crtc != NULL);
  917. intel_fbc_update_state_cache(crtc);
  918. if (intel_fbc_alloc_cfb(crtc)) {
  919. fbc->no_fbc_reason = "not enough stolen memory";
  920. goto out;
  921. }
  922. DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
  923. fbc->no_fbc_reason = "FBC enabled but not active yet\n";
  924. fbc->enabled = true;
  925. fbc->crtc = crtc;
  926. out:
  927. mutex_unlock(&fbc->lock);
  928. }
  929. /**
  930. * __intel_fbc_disable - disable FBC
  931. * @dev_priv: i915 device instance
  932. *
  933. * This is the low level function that actually disables FBC. Callers should
  934. * grab the FBC lock.
  935. */
  936. static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
  937. {
  938. struct intel_fbc *fbc = &dev_priv->fbc;
  939. struct intel_crtc *crtc = fbc->crtc;
  940. WARN_ON(!mutex_is_locked(&fbc->lock));
  941. WARN_ON(!fbc->enabled);
  942. WARN_ON(fbc->active);
  943. WARN_ON(crtc->active);
  944. DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
  945. __intel_fbc_cleanup_cfb(dev_priv);
  946. fbc->enabled = false;
  947. fbc->crtc = NULL;
  948. }
  949. /**
  950. * intel_fbc_disable - disable FBC if it's associated with crtc
  951. * @crtc: the CRTC
  952. *
  953. * This function disables FBC if it's associated with the provided CRTC.
  954. */
  955. void intel_fbc_disable(struct intel_crtc *crtc)
  956. {
  957. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  958. struct intel_fbc *fbc = &dev_priv->fbc;
  959. if (!fbc_supported(dev_priv))
  960. return;
  961. mutex_lock(&fbc->lock);
  962. if (fbc->crtc == crtc) {
  963. WARN_ON(!fbc->enabled);
  964. WARN_ON(fbc->active);
  965. __intel_fbc_disable(dev_priv);
  966. }
  967. mutex_unlock(&fbc->lock);
  968. cancel_work_sync(&fbc->work.work);
  969. }
  970. /**
  971. * intel_fbc_global_disable - globally disable FBC
  972. * @dev_priv: i915 device instance
  973. *
  974. * This function disables FBC regardless of which CRTC is associated with it.
  975. */
  976. void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
  977. {
  978. struct intel_fbc *fbc = &dev_priv->fbc;
  979. if (!fbc_supported(dev_priv))
  980. return;
  981. mutex_lock(&fbc->lock);
  982. if (fbc->enabled)
  983. __intel_fbc_disable(dev_priv);
  984. mutex_unlock(&fbc->lock);
  985. cancel_work_sync(&fbc->work.work);
  986. }
  987. /**
  988. * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
  989. * @dev_priv: i915 device instance
  990. *
  991. * The FBC code needs to track CRTC visibility since the older platforms can't
  992. * have FBC enabled while multiple pipes are used. This function does the
  993. * initial setup at driver load to make sure FBC is matching the real hardware.
  994. */
  995. void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
  996. {
  997. struct intel_crtc *crtc;
  998. /* Don't even bother tracking anything if we don't need. */
  999. if (!no_fbc_on_multiple_pipes(dev_priv))
  1000. return;
  1001. for_each_intel_crtc(dev_priv->dev, crtc)
  1002. if (intel_crtc_active(&crtc->base) &&
  1003. to_intel_plane_state(crtc->base.primary->state)->visible)
  1004. dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
  1005. }
  1006. /**
  1007. * intel_fbc_init - Initialize FBC
  1008. * @dev_priv: the i915 device
  1009. *
  1010. * This function might be called during PM init process.
  1011. */
  1012. void intel_fbc_init(struct drm_i915_private *dev_priv)
  1013. {
  1014. struct intel_fbc *fbc = &dev_priv->fbc;
  1015. enum pipe pipe;
  1016. INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
  1017. mutex_init(&fbc->lock);
  1018. fbc->enabled = false;
  1019. fbc->active = false;
  1020. fbc->work.scheduled = false;
  1021. if (!HAS_FBC(dev_priv)) {
  1022. fbc->no_fbc_reason = "unsupported by this chipset";
  1023. return;
  1024. }
  1025. for_each_pipe(dev_priv, pipe) {
  1026. fbc->possible_framebuffer_bits |=
  1027. INTEL_FRONTBUFFER_PRIMARY(pipe);
  1028. if (fbc_on_pipe_a_only(dev_priv))
  1029. break;
  1030. }
  1031. /* This value was pulled out of someone's hat */
  1032. if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv))
  1033. I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
  1034. /* We still don't have any sort of hardware state readout for FBC, so
  1035. * deactivate it in case the BIOS activated it to make sure software
  1036. * matches the hardware state. */
  1037. if (intel_fbc_hw_is_active(dev_priv))
  1038. intel_fbc_hw_deactivate(dev_priv);
  1039. }