intel_fbc.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263
  1. /*
  2. * Copyright © 2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. */
  23. /**
  24. * DOC: Frame Buffer Compression (FBC)
  25. *
  26. * FBC tries to save memory bandwidth (and so power consumption) by
  27. * compressing the amount of memory used by the display. It is total
  28. * transparent to user space and completely handled in the kernel.
  29. *
  30. * The benefits of FBC are mostly visible with solid backgrounds and
  31. * variation-less patterns. It comes from keeping the memory footprint small
  32. * and having fewer memory pages opened and accessed for refreshing the display.
  33. *
  34. * i915 is responsible to reserve stolen memory for FBC and configure its
  35. * offset on proper registers. The hardware takes care of all
  36. * compress/decompress. However there are many known cases where we have to
  37. * forcibly disable it to allow proper screen updates.
  38. */
  39. #include "intel_drv.h"
  40. #include "i915_drv.h"
  41. static inline bool fbc_supported(struct drm_i915_private *dev_priv)
  42. {
  43. return HAS_FBC(dev_priv);
  44. }
  45. static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
  46. {
  47. return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8;
  48. }
  49. static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
  50. {
  51. return INTEL_INFO(dev_priv)->gen < 4;
  52. }
  53. static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
  54. {
  55. return INTEL_INFO(dev_priv)->gen <= 3;
  56. }
  57. /*
  58. * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
  59. * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
  60. * origin so the x and y offsets can actually fit the registers. As a
  61. * consequence, the fence doesn't really start exactly at the display plane
  62. * address we program because it starts at the real start of the buffer, so we
  63. * have to take this into consideration here.
  64. */
  65. static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
  66. {
  67. return crtc->base.y - crtc->adjusted_y;
  68. }
  69. /*
  70. * For SKL+, the plane source size used by the hardware is based on the value we
  71. * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
  72. * we wrote to PIPESRC.
  73. */
  74. static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
  75. int *width, int *height)
  76. {
  77. int w, h;
  78. if (intel_rotation_90_or_270(cache->plane.rotation)) {
  79. w = cache->plane.src_h;
  80. h = cache->plane.src_w;
  81. } else {
  82. w = cache->plane.src_w;
  83. h = cache->plane.src_h;
  84. }
  85. if (width)
  86. *width = w;
  87. if (height)
  88. *height = h;
  89. }
  90. static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
  91. struct intel_fbc_state_cache *cache)
  92. {
  93. int lines;
  94. intel_fbc_get_plane_source_size(cache, NULL, &lines);
  95. if (INTEL_INFO(dev_priv)->gen >= 7)
  96. lines = min(lines, 2048);
  97. /* Hardware needs the full buffer stride, not just the active area. */
  98. return lines * cache->fb.stride;
  99. }
  100. static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
  101. {
  102. u32 fbc_ctl;
  103. /* Disable compression */
  104. fbc_ctl = I915_READ(FBC_CONTROL);
  105. if ((fbc_ctl & FBC_CTL_EN) == 0)
  106. return;
  107. fbc_ctl &= ~FBC_CTL_EN;
  108. I915_WRITE(FBC_CONTROL, fbc_ctl);
  109. /* Wait for compressing bit to clear */
  110. if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
  111. DRM_DEBUG_KMS("FBC idle timed out\n");
  112. return;
  113. }
  114. }
  115. static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
  116. {
  117. struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
  118. int cfb_pitch;
  119. int i;
  120. u32 fbc_ctl;
  121. /* Note: fbc.threshold == 1 for i8xx */
  122. cfb_pitch = params->cfb_size / FBC_LL_SIZE;
  123. if (params->fb.stride < cfb_pitch)
  124. cfb_pitch = params->fb.stride;
  125. /* FBC_CTL wants 32B or 64B units */
  126. if (IS_GEN2(dev_priv))
  127. cfb_pitch = (cfb_pitch / 32) - 1;
  128. else
  129. cfb_pitch = (cfb_pitch / 64) - 1;
  130. /* Clear old tags */
  131. for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
  132. I915_WRITE(FBC_TAG(i), 0);
  133. if (IS_GEN4(dev_priv)) {
  134. u32 fbc_ctl2;
  135. /* Set it up... */
  136. fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
  137. fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane);
  138. I915_WRITE(FBC_CONTROL2, fbc_ctl2);
  139. I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
  140. }
  141. /* enable it... */
  142. fbc_ctl = I915_READ(FBC_CONTROL);
  143. fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
  144. fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
  145. if (IS_I945GM(dev_priv))
  146. fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
  147. fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
  148. fbc_ctl |= params->fb.fence_reg;
  149. I915_WRITE(FBC_CONTROL, fbc_ctl);
  150. }
  151. static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
  152. {
  153. return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
  154. }
  155. static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
  156. {
  157. struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
  158. u32 dpfc_ctl;
  159. dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN;
  160. if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
  161. dpfc_ctl |= DPFC_CTL_LIMIT_2X;
  162. else
  163. dpfc_ctl |= DPFC_CTL_LIMIT_1X;
  164. dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
  165. I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
  166. /* enable it... */
  167. I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  168. }
  169. static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
  170. {
  171. u32 dpfc_ctl;
  172. /* Disable compression */
  173. dpfc_ctl = I915_READ(DPFC_CONTROL);
  174. if (dpfc_ctl & DPFC_CTL_EN) {
  175. dpfc_ctl &= ~DPFC_CTL_EN;
  176. I915_WRITE(DPFC_CONTROL, dpfc_ctl);
  177. }
  178. }
  179. static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
  180. {
  181. return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
  182. }
  183. /* This function forces a CFB recompression through the nuke operation. */
  184. static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
  185. {
  186. I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
  187. POSTING_READ(MSG_FBC_REND_STATE);
  188. }
  189. static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
  190. {
  191. struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
  192. u32 dpfc_ctl;
  193. int threshold = dev_priv->fbc.threshold;
  194. dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
  195. if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
  196. threshold++;
  197. switch (threshold) {
  198. case 4:
  199. case 3:
  200. dpfc_ctl |= DPFC_CTL_LIMIT_4X;
  201. break;
  202. case 2:
  203. dpfc_ctl |= DPFC_CTL_LIMIT_2X;
  204. break;
  205. case 1:
  206. dpfc_ctl |= DPFC_CTL_LIMIT_1X;
  207. break;
  208. }
  209. dpfc_ctl |= DPFC_CTL_FENCE_EN;
  210. if (IS_GEN5(dev_priv))
  211. dpfc_ctl |= params->fb.fence_reg;
  212. I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
  213. I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
  214. /* enable it... */
  215. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  216. if (IS_GEN6(dev_priv)) {
  217. I915_WRITE(SNB_DPFC_CTL_SA,
  218. SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
  219. I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
  220. }
  221. intel_fbc_recompress(dev_priv);
  222. }
  223. static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
  224. {
  225. u32 dpfc_ctl;
  226. /* Disable compression */
  227. dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
  228. if (dpfc_ctl & DPFC_CTL_EN) {
  229. dpfc_ctl &= ~DPFC_CTL_EN;
  230. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
  231. }
  232. }
  233. static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
  234. {
  235. return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
  236. }
  237. static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
  238. {
  239. struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
  240. u32 dpfc_ctl;
  241. int threshold = dev_priv->fbc.threshold;
  242. dpfc_ctl = 0;
  243. if (IS_IVYBRIDGE(dev_priv))
  244. dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
  245. if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
  246. threshold++;
  247. switch (threshold) {
  248. case 4:
  249. case 3:
  250. dpfc_ctl |= DPFC_CTL_LIMIT_4X;
  251. break;
  252. case 2:
  253. dpfc_ctl |= DPFC_CTL_LIMIT_2X;
  254. break;
  255. case 1:
  256. dpfc_ctl |= DPFC_CTL_LIMIT_1X;
  257. break;
  258. }
  259. dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
  260. if (dev_priv->fbc.false_color)
  261. dpfc_ctl |= FBC_CTL_FALSE_COLOR;
  262. if (IS_IVYBRIDGE(dev_priv)) {
  263. /* WaFbcAsynchFlipDisableFbcQueue:ivb */
  264. I915_WRITE(ILK_DISPLAY_CHICKEN1,
  265. I915_READ(ILK_DISPLAY_CHICKEN1) |
  266. ILK_FBCQ_DIS);
  267. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  268. /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
  269. I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
  270. I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
  271. HSW_FBCQ_DIS);
  272. }
  273. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  274. I915_WRITE(SNB_DPFC_CTL_SA,
  275. SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
  276. I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
  277. intel_fbc_recompress(dev_priv);
  278. }
  279. static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
  280. {
  281. if (INTEL_INFO(dev_priv)->gen >= 5)
  282. return ilk_fbc_is_active(dev_priv);
  283. else if (IS_GM45(dev_priv))
  284. return g4x_fbc_is_active(dev_priv);
  285. else
  286. return i8xx_fbc_is_active(dev_priv);
  287. }
  288. static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
  289. {
  290. struct intel_fbc *fbc = &dev_priv->fbc;
  291. fbc->active = true;
  292. if (INTEL_INFO(dev_priv)->gen >= 7)
  293. gen7_fbc_activate(dev_priv);
  294. else if (INTEL_INFO(dev_priv)->gen >= 5)
  295. ilk_fbc_activate(dev_priv);
  296. else if (IS_GM45(dev_priv))
  297. g4x_fbc_activate(dev_priv);
  298. else
  299. i8xx_fbc_activate(dev_priv);
  300. }
  301. static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
  302. {
  303. struct intel_fbc *fbc = &dev_priv->fbc;
  304. fbc->active = false;
  305. if (INTEL_INFO(dev_priv)->gen >= 5)
  306. ilk_fbc_deactivate(dev_priv);
  307. else if (IS_GM45(dev_priv))
  308. g4x_fbc_deactivate(dev_priv);
  309. else
  310. i8xx_fbc_deactivate(dev_priv);
  311. }
  312. /**
  313. * intel_fbc_is_active - Is FBC active?
  314. * @dev_priv: i915 device instance
  315. *
  316. * This function is used to verify the current state of FBC.
  317. * FIXME: This should be tracked in the plane config eventually
  318. * instead of queried at runtime for most callers.
  319. */
  320. bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
  321. {
  322. return dev_priv->fbc.active;
  323. }
  324. static void intel_fbc_work_fn(struct work_struct *__work)
  325. {
  326. struct drm_i915_private *dev_priv =
  327. container_of(__work, struct drm_i915_private, fbc.work.work);
  328. struct intel_fbc *fbc = &dev_priv->fbc;
  329. struct intel_fbc_work *work = &fbc->work;
  330. struct intel_crtc *crtc = fbc->crtc;
  331. struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
  332. if (drm_crtc_vblank_get(&crtc->base)) {
  333. DRM_ERROR("vblank not available for FBC on pipe %c\n",
  334. pipe_name(crtc->pipe));
  335. mutex_lock(&fbc->lock);
  336. work->scheduled = false;
  337. mutex_unlock(&fbc->lock);
  338. return;
  339. }
  340. retry:
  341. /* Delay the actual enabling to let pageflipping cease and the
  342. * display to settle before starting the compression. Note that
  343. * this delay also serves a second purpose: it allows for a
  344. * vblank to pass after disabling the FBC before we attempt
  345. * to modify the control registers.
  346. *
  347. * WaFbcWaitForVBlankBeforeEnable:ilk,snb
  348. *
  349. * It is also worth mentioning that since work->scheduled_vblank can be
  350. * updated multiple times by the other threads, hitting the timeout is
  351. * not an error condition. We'll just end up hitting the "goto retry"
  352. * case below.
  353. */
  354. wait_event_timeout(vblank->queue,
  355. drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
  356. msecs_to_jiffies(50));
  357. mutex_lock(&fbc->lock);
  358. /* Were we cancelled? */
  359. if (!work->scheduled)
  360. goto out;
  361. /* Were we delayed again while this function was sleeping? */
  362. if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
  363. mutex_unlock(&fbc->lock);
  364. goto retry;
  365. }
  366. intel_fbc_hw_activate(dev_priv);
  367. work->scheduled = false;
  368. out:
  369. mutex_unlock(&fbc->lock);
  370. drm_crtc_vblank_put(&crtc->base);
  371. }
  372. static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
  373. {
  374. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  375. struct intel_fbc *fbc = &dev_priv->fbc;
  376. struct intel_fbc_work *work = &fbc->work;
  377. WARN_ON(!mutex_is_locked(&fbc->lock));
  378. if (drm_crtc_vblank_get(&crtc->base)) {
  379. DRM_ERROR("vblank not available for FBC on pipe %c\n",
  380. pipe_name(crtc->pipe));
  381. return;
  382. }
  383. /* It is useless to call intel_fbc_cancel_work() or cancel_work() in
  384. * this function since we're not releasing fbc.lock, so it won't have an
  385. * opportunity to grab it to discover that it was cancelled. So we just
  386. * update the expected jiffy count. */
  387. work->scheduled = true;
  388. work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
  389. drm_crtc_vblank_put(&crtc->base);
  390. schedule_work(&work->work);
  391. }
  392. static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
  393. {
  394. struct intel_fbc *fbc = &dev_priv->fbc;
  395. WARN_ON(!mutex_is_locked(&fbc->lock));
  396. /* Calling cancel_work() here won't help due to the fact that the work
  397. * function grabs fbc->lock. Just set scheduled to false so the work
  398. * function can know it was cancelled. */
  399. fbc->work.scheduled = false;
  400. if (fbc->active)
  401. intel_fbc_hw_deactivate(dev_priv);
  402. }
  403. static bool multiple_pipes_ok(struct intel_crtc *crtc)
  404. {
  405. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  406. struct drm_plane *primary = crtc->base.primary;
  407. struct intel_fbc *fbc = &dev_priv->fbc;
  408. enum pipe pipe = crtc->pipe;
  409. /* Don't even bother tracking anything we don't need. */
  410. if (!no_fbc_on_multiple_pipes(dev_priv))
  411. return true;
  412. WARN_ON(!drm_modeset_is_locked(&primary->mutex));
  413. if (to_intel_plane_state(primary->state)->visible)
  414. fbc->visible_pipes_mask |= (1 << pipe);
  415. else
  416. fbc->visible_pipes_mask &= ~(1 << pipe);
  417. return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
  418. }
  419. static int find_compression_threshold(struct drm_i915_private *dev_priv,
  420. struct drm_mm_node *node,
  421. int size,
  422. int fb_cpp)
  423. {
  424. int compression_threshold = 1;
  425. int ret;
  426. u64 end;
  427. /* The FBC hardware for BDW/SKL doesn't have access to the stolen
  428. * reserved range size, so it always assumes the maximum (8mb) is used.
  429. * If we enable FBC using a CFB on that memory range we'll get FIFO
  430. * underruns, even if that range is not reserved by the BIOS. */
  431. if (IS_BROADWELL(dev_priv) ||
  432. IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  433. end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
  434. else
  435. end = dev_priv->gtt.stolen_usable_size;
  436. /* HACK: This code depends on what we will do in *_enable_fbc. If that
  437. * code changes, this code needs to change as well.
  438. *
  439. * The enable_fbc code will attempt to use one of our 2 compression
  440. * thresholds, therefore, in that case, we only have 1 resort.
  441. */
  442. /* Try to over-allocate to reduce reallocations and fragmentation. */
  443. ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
  444. 4096, 0, end);
  445. if (ret == 0)
  446. return compression_threshold;
  447. again:
  448. /* HW's ability to limit the CFB is 1:4 */
  449. if (compression_threshold > 4 ||
  450. (fb_cpp == 2 && compression_threshold == 2))
  451. return 0;
  452. ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
  453. 4096, 0, end);
  454. if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
  455. return 0;
  456. } else if (ret) {
  457. compression_threshold <<= 1;
  458. goto again;
  459. } else {
  460. return compression_threshold;
  461. }
  462. }
  463. static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
  464. {
  465. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  466. struct intel_fbc *fbc = &dev_priv->fbc;
  467. struct drm_mm_node *uninitialized_var(compressed_llb);
  468. int size, fb_cpp, ret;
  469. WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
  470. size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
  471. fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0);
  472. ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
  473. size, fb_cpp);
  474. if (!ret)
  475. goto err_llb;
  476. else if (ret > 1) {
  477. DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
  478. }
  479. fbc->threshold = ret;
  480. if (INTEL_INFO(dev_priv)->gen >= 5)
  481. I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
  482. else if (IS_GM45(dev_priv)) {
  483. I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
  484. } else {
  485. compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
  486. if (!compressed_llb)
  487. goto err_fb;
  488. ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
  489. 4096, 4096);
  490. if (ret)
  491. goto err_fb;
  492. fbc->compressed_llb = compressed_llb;
  493. I915_WRITE(FBC_CFB_BASE,
  494. dev_priv->mm.stolen_base + fbc->compressed_fb.start);
  495. I915_WRITE(FBC_LL_BASE,
  496. dev_priv->mm.stolen_base + compressed_llb->start);
  497. }
  498. DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
  499. fbc->compressed_fb.size, fbc->threshold);
  500. return 0;
  501. err_fb:
  502. kfree(compressed_llb);
  503. i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
  504. err_llb:
  505. pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
  506. return -ENOSPC;
  507. }
  508. static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
  509. {
  510. struct intel_fbc *fbc = &dev_priv->fbc;
  511. if (drm_mm_node_allocated(&fbc->compressed_fb))
  512. i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
  513. if (fbc->compressed_llb) {
  514. i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
  515. kfree(fbc->compressed_llb);
  516. }
  517. }
  518. void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
  519. {
  520. struct intel_fbc *fbc = &dev_priv->fbc;
  521. if (!fbc_supported(dev_priv))
  522. return;
  523. mutex_lock(&fbc->lock);
  524. __intel_fbc_cleanup_cfb(dev_priv);
  525. mutex_unlock(&fbc->lock);
  526. }
  527. static bool stride_is_valid(struct drm_i915_private *dev_priv,
  528. unsigned int stride)
  529. {
  530. /* These should have been caught earlier. */
  531. WARN_ON(stride < 512);
  532. WARN_ON((stride & (64 - 1)) != 0);
  533. /* Below are the additional FBC restrictions. */
  534. if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
  535. return stride == 4096 || stride == 8192;
  536. if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
  537. return false;
  538. if (stride > 16384)
  539. return false;
  540. return true;
  541. }
  542. static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
  543. uint32_t pixel_format)
  544. {
  545. switch (pixel_format) {
  546. case DRM_FORMAT_XRGB8888:
  547. case DRM_FORMAT_XBGR8888:
  548. return true;
  549. case DRM_FORMAT_XRGB1555:
  550. case DRM_FORMAT_RGB565:
  551. /* 16bpp not supported on gen2 */
  552. if (IS_GEN2(dev_priv))
  553. return false;
  554. /* WaFbcOnly1to1Ratio:ctg */
  555. if (IS_G4X(dev_priv))
  556. return false;
  557. return true;
  558. default:
  559. return false;
  560. }
  561. }
  562. /*
  563. * For some reason, the hardware tracking starts looking at whatever we
  564. * programmed as the display plane base address register. It does not look at
  565. * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
  566. * variables instead of just looking at the pipe/plane size.
  567. */
  568. static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
  569. {
  570. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  571. struct intel_fbc *fbc = &dev_priv->fbc;
  572. unsigned int effective_w, effective_h, max_w, max_h;
  573. if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
  574. max_w = 4096;
  575. max_h = 4096;
  576. } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
  577. max_w = 4096;
  578. max_h = 2048;
  579. } else {
  580. max_w = 2048;
  581. max_h = 1536;
  582. }
  583. intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
  584. &effective_h);
  585. effective_w += crtc->adjusted_x;
  586. effective_h += crtc->adjusted_y;
  587. return effective_w <= max_w && effective_h <= max_h;
  588. }
  589. static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
  590. {
  591. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  592. struct intel_fbc *fbc = &dev_priv->fbc;
  593. struct intel_fbc_state_cache *cache = &fbc->state_cache;
  594. struct intel_crtc_state *crtc_state =
  595. to_intel_crtc_state(crtc->base.state);
  596. struct intel_plane_state *plane_state =
  597. to_intel_plane_state(crtc->base.primary->state);
  598. struct drm_framebuffer *fb = plane_state->base.fb;
  599. struct drm_i915_gem_object *obj;
  600. WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
  601. WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
  602. cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
  603. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  604. cache->crtc.hsw_bdw_pixel_rate =
  605. ilk_pipe_pixel_rate(crtc_state);
  606. cache->plane.rotation = plane_state->base.rotation;
  607. cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16;
  608. cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16;
  609. cache->plane.visible = plane_state->visible;
  610. if (!cache->plane.visible)
  611. return;
  612. obj = intel_fb_obj(fb);
  613. /* FIXME: We lack the proper locking here, so only run this on the
  614. * platforms that need. */
  615. if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7)
  616. cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
  617. cache->fb.pixel_format = fb->pixel_format;
  618. cache->fb.stride = fb->pitches[0];
  619. cache->fb.fence_reg = obj->fence_reg;
  620. cache->fb.tiling_mode = obj->tiling_mode;
  621. }
  622. static bool intel_fbc_can_activate(struct intel_crtc *crtc)
  623. {
  624. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  625. struct intel_fbc *fbc = &dev_priv->fbc;
  626. struct intel_fbc_state_cache *cache = &fbc->state_cache;
  627. if (!cache->plane.visible) {
  628. fbc->no_fbc_reason = "primary plane not visible";
  629. return false;
  630. }
  631. if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) ||
  632. (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) {
  633. fbc->no_fbc_reason = "incompatible mode";
  634. return false;
  635. }
  636. if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
  637. fbc->no_fbc_reason = "mode too large for compression";
  638. return false;
  639. }
  640. /* The use of a CPU fence is mandatory in order to detect writes
  641. * by the CPU to the scanout and trigger updates to the FBC.
  642. */
  643. if (cache->fb.tiling_mode != I915_TILING_X ||
  644. cache->fb.fence_reg == I915_FENCE_REG_NONE) {
  645. fbc->no_fbc_reason = "framebuffer not tiled or fenced";
  646. return false;
  647. }
  648. if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
  649. cache->plane.rotation != BIT(DRM_ROTATE_0)) {
  650. fbc->no_fbc_reason = "rotation unsupported";
  651. return false;
  652. }
  653. if (!stride_is_valid(dev_priv, cache->fb.stride)) {
  654. fbc->no_fbc_reason = "framebuffer stride not supported";
  655. return false;
  656. }
  657. if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) {
  658. fbc->no_fbc_reason = "pixel format is invalid";
  659. return false;
  660. }
  661. /* WaFbcExceedCdClockThreshold:hsw,bdw */
  662. if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
  663. cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) {
  664. fbc->no_fbc_reason = "pixel rate is too big";
  665. return false;
  666. }
  667. /* It is possible for the required CFB size change without a
  668. * crtc->disable + crtc->enable since it is possible to change the
  669. * stride without triggering a full modeset. Since we try to
  670. * over-allocate the CFB, there's a chance we may keep FBC enabled even
  671. * if this happens, but if we exceed the current CFB size we'll have to
  672. * disable FBC. Notice that it would be possible to disable FBC, wait
  673. * for a frame, free the stolen node, then try to reenable FBC in case
  674. * we didn't get any invalidate/deactivate calls, but this would require
  675. * a lot of tracking just for a specific case. If we conclude it's an
  676. * important case, we can implement it later. */
  677. if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
  678. fbc->compressed_fb.size * fbc->threshold) {
  679. fbc->no_fbc_reason = "CFB requirements changed";
  680. return false;
  681. }
  682. return true;
  683. }
  684. static bool intel_fbc_can_choose(struct intel_crtc *crtc)
  685. {
  686. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  687. struct intel_fbc *fbc = &dev_priv->fbc;
  688. bool enable_by_default = IS_HASWELL(dev_priv) ||
  689. IS_BROADWELL(dev_priv);
  690. if (intel_vgpu_active(dev_priv->dev)) {
  691. fbc->no_fbc_reason = "VGPU is active";
  692. return false;
  693. }
  694. if (i915.enable_fbc < 0 && !enable_by_default) {
  695. fbc->no_fbc_reason = "disabled per chip default";
  696. return false;
  697. }
  698. if (!i915.enable_fbc) {
  699. fbc->no_fbc_reason = "disabled per module param";
  700. return false;
  701. }
  702. if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
  703. fbc->no_fbc_reason = "no enabled pipes can have FBC";
  704. return false;
  705. }
  706. if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
  707. fbc->no_fbc_reason = "no enabled planes can have FBC";
  708. return false;
  709. }
  710. return true;
  711. }
  712. static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
  713. struct intel_fbc_reg_params *params)
  714. {
  715. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  716. struct intel_fbc *fbc = &dev_priv->fbc;
  717. struct intel_fbc_state_cache *cache = &fbc->state_cache;
  718. /* Since all our fields are integer types, use memset here so the
  719. * comparison function can rely on memcmp because the padding will be
  720. * zero. */
  721. memset(params, 0, sizeof(*params));
  722. params->crtc.pipe = crtc->pipe;
  723. params->crtc.plane = crtc->plane;
  724. params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
  725. params->fb.pixel_format = cache->fb.pixel_format;
  726. params->fb.stride = cache->fb.stride;
  727. params->fb.fence_reg = cache->fb.fence_reg;
  728. params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
  729. params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
  730. }
  731. static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
  732. struct intel_fbc_reg_params *params2)
  733. {
  734. /* We can use this since intel_fbc_get_reg_params() does a memset. */
  735. return memcmp(params1, params2, sizeof(*params1)) == 0;
  736. }
  737. void intel_fbc_pre_update(struct intel_crtc *crtc)
  738. {
  739. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  740. struct intel_fbc *fbc = &dev_priv->fbc;
  741. if (!fbc_supported(dev_priv))
  742. return;
  743. mutex_lock(&fbc->lock);
  744. if (!multiple_pipes_ok(crtc)) {
  745. fbc->no_fbc_reason = "more than one pipe active";
  746. goto deactivate;
  747. }
  748. if (!fbc->enabled || fbc->crtc != crtc)
  749. goto unlock;
  750. intel_fbc_update_state_cache(crtc);
  751. deactivate:
  752. intel_fbc_deactivate(dev_priv);
  753. unlock:
  754. mutex_unlock(&fbc->lock);
  755. }
  756. static void __intel_fbc_post_update(struct intel_crtc *crtc)
  757. {
  758. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  759. struct intel_fbc *fbc = &dev_priv->fbc;
  760. struct intel_fbc_reg_params old_params;
  761. WARN_ON(!mutex_is_locked(&fbc->lock));
  762. if (!fbc->enabled || fbc->crtc != crtc)
  763. return;
  764. if (!intel_fbc_can_activate(crtc)) {
  765. WARN_ON(fbc->active);
  766. return;
  767. }
  768. old_params = fbc->params;
  769. intel_fbc_get_reg_params(crtc, &fbc->params);
  770. /* If the scanout has not changed, don't modify the FBC settings.
  771. * Note that we make the fundamental assumption that the fb->obj
  772. * cannot be unpinned (and have its GTT offset and fence revoked)
  773. * without first being decoupled from the scanout and FBC disabled.
  774. */
  775. if (fbc->active &&
  776. intel_fbc_reg_params_equal(&old_params, &fbc->params))
  777. return;
  778. intel_fbc_deactivate(dev_priv);
  779. intel_fbc_schedule_activation(crtc);
  780. fbc->no_fbc_reason = "FBC enabled (active or scheduled)";
  781. }
  782. void intel_fbc_post_update(struct intel_crtc *crtc)
  783. {
  784. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  785. struct intel_fbc *fbc = &dev_priv->fbc;
  786. if (!fbc_supported(dev_priv))
  787. return;
  788. mutex_lock(&fbc->lock);
  789. __intel_fbc_post_update(crtc);
  790. mutex_unlock(&fbc->lock);
  791. }
  792. static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
  793. {
  794. if (fbc->enabled)
  795. return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
  796. else
  797. return fbc->possible_framebuffer_bits;
  798. }
  799. void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
  800. unsigned int frontbuffer_bits,
  801. enum fb_op_origin origin)
  802. {
  803. struct intel_fbc *fbc = &dev_priv->fbc;
  804. if (!fbc_supported(dev_priv))
  805. return;
  806. if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
  807. return;
  808. mutex_lock(&fbc->lock);
  809. fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
  810. if (fbc->enabled && fbc->busy_bits)
  811. intel_fbc_deactivate(dev_priv);
  812. mutex_unlock(&fbc->lock);
  813. }
  814. void intel_fbc_flush(struct drm_i915_private *dev_priv,
  815. unsigned int frontbuffer_bits, enum fb_op_origin origin)
  816. {
  817. struct intel_fbc *fbc = &dev_priv->fbc;
  818. if (!fbc_supported(dev_priv))
  819. return;
  820. if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
  821. return;
  822. mutex_lock(&fbc->lock);
  823. fbc->busy_bits &= ~frontbuffer_bits;
  824. if (!fbc->busy_bits && fbc->enabled &&
  825. (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
  826. if (fbc->active)
  827. intel_fbc_recompress(dev_priv);
  828. else
  829. __intel_fbc_post_update(fbc->crtc);
  830. }
  831. mutex_unlock(&fbc->lock);
  832. }
  833. /**
  834. * intel_fbc_choose_crtc - select a CRTC to enable FBC on
  835. * @dev_priv: i915 device instance
  836. * @state: the atomic state structure
  837. *
  838. * This function looks at the proposed state for CRTCs and planes, then chooses
  839. * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
  840. * true.
  841. *
  842. * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
  843. * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
  844. */
  845. void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
  846. struct drm_atomic_state *state)
  847. {
  848. struct intel_fbc *fbc = &dev_priv->fbc;
  849. struct drm_crtc *crtc;
  850. struct drm_crtc_state *crtc_state;
  851. struct drm_plane *plane;
  852. struct drm_plane_state *plane_state;
  853. bool fbc_crtc_present = false;
  854. int i, j;
  855. mutex_lock(&fbc->lock);
  856. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  857. if (fbc->crtc == to_intel_crtc(crtc)) {
  858. fbc_crtc_present = true;
  859. break;
  860. }
  861. }
  862. /* This atomic commit doesn't involve the CRTC currently tied to FBC. */
  863. if (!fbc_crtc_present && fbc->crtc != NULL)
  864. goto out;
  865. /* Simply choose the first CRTC that is compatible and has a visible
  866. * plane. We could go for fancier schemes such as checking the plane
  867. * size, but this would just affect the few platforms that don't tie FBC
  868. * to pipe or plane A. */
  869. for_each_plane_in_state(state, plane, plane_state, i) {
  870. struct intel_plane_state *intel_plane_state =
  871. to_intel_plane_state(plane_state);
  872. if (!intel_plane_state->visible)
  873. continue;
  874. for_each_crtc_in_state(state, crtc, crtc_state, j) {
  875. struct intel_crtc_state *intel_crtc_state =
  876. to_intel_crtc_state(crtc_state);
  877. if (plane_state->crtc != crtc)
  878. continue;
  879. if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
  880. break;
  881. intel_crtc_state->enable_fbc = true;
  882. goto out;
  883. }
  884. }
  885. out:
  886. mutex_unlock(&fbc->lock);
  887. }
  888. /**
  889. * intel_fbc_enable: tries to enable FBC on the CRTC
  890. * @crtc: the CRTC
  891. *
  892. * This function checks if the given CRTC was chosen for FBC, then enables it if
  893. * possible. Notice that it doesn't activate FBC. It is valid to call
  894. * intel_fbc_enable multiple times for the same pipe without an
  895. * intel_fbc_disable in the middle, as long as it is deactivated.
  896. */
  897. void intel_fbc_enable(struct intel_crtc *crtc)
  898. {
  899. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  900. struct intel_fbc *fbc = &dev_priv->fbc;
  901. if (!fbc_supported(dev_priv))
  902. return;
  903. mutex_lock(&fbc->lock);
  904. if (fbc->enabled) {
  905. WARN_ON(fbc->crtc == NULL);
  906. if (fbc->crtc == crtc) {
  907. WARN_ON(!crtc->config->enable_fbc);
  908. WARN_ON(fbc->active);
  909. }
  910. goto out;
  911. }
  912. if (!crtc->config->enable_fbc)
  913. goto out;
  914. WARN_ON(fbc->active);
  915. WARN_ON(fbc->crtc != NULL);
  916. intel_fbc_update_state_cache(crtc);
  917. if (intel_fbc_alloc_cfb(crtc)) {
  918. fbc->no_fbc_reason = "not enough stolen memory";
  919. goto out;
  920. }
  921. DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
  922. fbc->no_fbc_reason = "FBC enabled but not active yet\n";
  923. fbc->enabled = true;
  924. fbc->crtc = crtc;
  925. out:
  926. mutex_unlock(&fbc->lock);
  927. }
  928. /**
  929. * __intel_fbc_disable - disable FBC
  930. * @dev_priv: i915 device instance
  931. *
  932. * This is the low level function that actually disables FBC. Callers should
  933. * grab the FBC lock.
  934. */
  935. static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
  936. {
  937. struct intel_fbc *fbc = &dev_priv->fbc;
  938. struct intel_crtc *crtc = fbc->crtc;
  939. WARN_ON(!mutex_is_locked(&fbc->lock));
  940. WARN_ON(!fbc->enabled);
  941. WARN_ON(fbc->active);
  942. WARN_ON(crtc->active);
  943. DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
  944. __intel_fbc_cleanup_cfb(dev_priv);
  945. fbc->enabled = false;
  946. fbc->crtc = NULL;
  947. }
  948. /**
  949. * intel_fbc_disable - disable FBC if it's associated with crtc
  950. * @crtc: the CRTC
  951. *
  952. * This function disables FBC if it's associated with the provided CRTC.
  953. */
  954. void intel_fbc_disable(struct intel_crtc *crtc)
  955. {
  956. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  957. struct intel_fbc *fbc = &dev_priv->fbc;
  958. if (!fbc_supported(dev_priv))
  959. return;
  960. mutex_lock(&fbc->lock);
  961. if (fbc->crtc == crtc) {
  962. WARN_ON(!fbc->enabled);
  963. WARN_ON(fbc->active);
  964. __intel_fbc_disable(dev_priv);
  965. }
  966. mutex_unlock(&fbc->lock);
  967. cancel_work_sync(&fbc->work.work);
  968. }
  969. /**
  970. * intel_fbc_global_disable - globally disable FBC
  971. * @dev_priv: i915 device instance
  972. *
  973. * This function disables FBC regardless of which CRTC is associated with it.
  974. */
  975. void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
  976. {
  977. struct intel_fbc *fbc = &dev_priv->fbc;
  978. if (!fbc_supported(dev_priv))
  979. return;
  980. mutex_lock(&fbc->lock);
  981. if (fbc->enabled)
  982. __intel_fbc_disable(dev_priv);
  983. mutex_unlock(&fbc->lock);
  984. cancel_work_sync(&fbc->work.work);
  985. }
  986. /**
  987. * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
  988. * @dev_priv: i915 device instance
  989. *
  990. * The FBC code needs to track CRTC visibility since the older platforms can't
  991. * have FBC enabled while multiple pipes are used. This function does the
  992. * initial setup at driver load to make sure FBC is matching the real hardware.
  993. */
  994. void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
  995. {
  996. struct intel_crtc *crtc;
  997. /* Don't even bother tracking anything if we don't need. */
  998. if (!no_fbc_on_multiple_pipes(dev_priv))
  999. return;
  1000. for_each_intel_crtc(dev_priv->dev, crtc)
  1001. if (intel_crtc_active(&crtc->base) &&
  1002. to_intel_plane_state(crtc->base.primary->state)->visible)
  1003. dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
  1004. }
  1005. /**
  1006. * intel_fbc_init - Initialize FBC
  1007. * @dev_priv: the i915 device
  1008. *
  1009. * This function might be called during PM init process.
  1010. */
  1011. void intel_fbc_init(struct drm_i915_private *dev_priv)
  1012. {
  1013. struct intel_fbc *fbc = &dev_priv->fbc;
  1014. enum pipe pipe;
  1015. INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
  1016. mutex_init(&fbc->lock);
  1017. fbc->enabled = false;
  1018. fbc->active = false;
  1019. fbc->work.scheduled = false;
  1020. if (!HAS_FBC(dev_priv)) {
  1021. fbc->no_fbc_reason = "unsupported by this chipset";
  1022. return;
  1023. }
  1024. for_each_pipe(dev_priv, pipe) {
  1025. fbc->possible_framebuffer_bits |=
  1026. INTEL_FRONTBUFFER_PRIMARY(pipe);
  1027. if (fbc_on_pipe_a_only(dev_priv))
  1028. break;
  1029. }
  1030. /* This value was pulled out of someone's hat */
  1031. if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv))
  1032. I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
  1033. /* We still don't have any sort of hardware state readout for FBC, so
  1034. * deactivate it in case the BIOS activated it to make sure software
  1035. * matches the hardware state. */
  1036. if (intel_fbc_hw_is_active(dev_priv))
  1037. intel_fbc_hw_deactivate(dev_priv);
  1038. }