tilcdc_crtc.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099
  1. /*
  2. * Copyright (C) 2012 Texas Instruments
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <drm/drm_atomic.h>
  18. #include <drm/drm_atomic_helper.h>
  19. #include <drm/drm_crtc.h>
  20. #include <drm/drm_flip_work.h>
  21. #include <drm/drm_plane_helper.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/completion.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/of_graph.h>
  26. #include <linux/math64.h>
  27. #include "tilcdc_drv.h"
  28. #include "tilcdc_regs.h"
  29. #define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
  30. #define TILCDC_PALETTE_SIZE 32
  31. #define TILCDC_PALETTE_FIRST_ENTRY 0x4000
  32. struct tilcdc_crtc {
  33. struct drm_crtc base;
  34. struct drm_plane primary;
  35. const struct tilcdc_panel_info *info;
  36. struct drm_pending_vblank_event *event;
  37. struct mutex enable_lock;
  38. bool enabled;
  39. bool shutdown;
  40. wait_queue_head_t frame_done_wq;
  41. bool frame_done;
  42. spinlock_t irq_lock;
  43. unsigned int lcd_fck_rate;
  44. ktime_t last_vblank;
  45. unsigned int hvtotal_us;
  46. struct drm_framebuffer *curr_fb;
  47. struct drm_framebuffer *next_fb;
  48. /* for deferred fb unref's: */
  49. struct drm_flip_work unref_work;
  50. /* Only set if an external encoder is connected */
  51. bool simulate_vesa_sync;
  52. int sync_lost_count;
  53. bool frame_intact;
  54. struct work_struct recover_work;
  55. dma_addr_t palette_dma_handle;
  56. u16 *palette_base;
  57. struct completion palette_loaded;
  58. };
  59. #define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
  60. static void unref_worker(struct drm_flip_work *work, void *val)
  61. {
  62. struct tilcdc_crtc *tilcdc_crtc =
  63. container_of(work, struct tilcdc_crtc, unref_work);
  64. struct drm_device *dev = tilcdc_crtc->base.dev;
  65. mutex_lock(&dev->mode_config.mutex);
  66. drm_framebuffer_put(val);
  67. mutex_unlock(&dev->mode_config.mutex);
  68. }
  69. static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
  70. {
  71. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  72. struct drm_device *dev = crtc->dev;
  73. struct tilcdc_drm_private *priv = dev->dev_private;
  74. struct drm_gem_cma_object *gem;
  75. dma_addr_t start, end;
  76. u64 dma_base_and_ceiling;
  77. gem = drm_fb_cma_get_gem_obj(fb, 0);
  78. start = gem->paddr + fb->offsets[0] +
  79. crtc->y * fb->pitches[0] +
  80. crtc->x * fb->format->cpp[0];
  81. end = start + (crtc->mode.vdisplay * fb->pitches[0]);
  82. /* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG
  83. * with a single insruction, if available. This should make it more
  84. * unlikely that LCDC would fetch the DMA addresses in the middle of
  85. * an update.
  86. */
  87. if (priv->rev == 1)
  88. end -= 1;
  89. dma_base_and_ceiling = (u64)end << 32 | start;
  90. tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
  91. if (tilcdc_crtc->curr_fb)
  92. drm_flip_work_queue(&tilcdc_crtc->unref_work,
  93. tilcdc_crtc->curr_fb);
  94. tilcdc_crtc->curr_fb = fb;
  95. }
  96. /*
  97. * The driver currently only supports only true color formats. For
  98. * true color the palette block is bypassed, but a 32 byte palette
  99. * should still be loaded. The first 16-bit entry must be 0x4000 while
  100. * all other entries must be zeroed.
  101. */
  102. static void tilcdc_crtc_load_palette(struct drm_crtc *crtc)
  103. {
  104. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  105. struct drm_device *dev = crtc->dev;
  106. struct tilcdc_drm_private *priv = dev->dev_private;
  107. int ret;
  108. reinit_completion(&tilcdc_crtc->palette_loaded);
  109. /* Tell the LCDC where the palette is located. */
  110. tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG,
  111. tilcdc_crtc->palette_dma_handle);
  112. tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG,
  113. (u32) tilcdc_crtc->palette_dma_handle +
  114. TILCDC_PALETTE_SIZE - 1);
  115. /* Set dma load mode for palette loading only. */
  116. tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
  117. LCDC_PALETTE_LOAD_MODE(PALETTE_ONLY),
  118. LCDC_PALETTE_LOAD_MODE_MASK);
  119. /* Enable DMA Palette Loaded Interrupt */
  120. if (priv->rev == 1)
  121. tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
  122. else
  123. tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_PL_INT_ENA);
  124. /* Enable LCDC DMA and wait for palette to be loaded. */
  125. tilcdc_clear_irqstatus(dev, 0xffffffff);
  126. tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  127. ret = wait_for_completion_timeout(&tilcdc_crtc->palette_loaded,
  128. msecs_to_jiffies(50));
  129. if (ret == 0)
  130. dev_err(dev->dev, "%s: Palette loading timeout", __func__);
  131. /* Disable LCDC DMA and DMA Palette Loaded Interrupt. */
  132. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  133. if (priv->rev == 1)
  134. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
  135. else
  136. tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_V2_PL_INT_ENA);
  137. }
  138. static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
  139. {
  140. struct tilcdc_drm_private *priv = dev->dev_private;
  141. tilcdc_clear_irqstatus(dev, 0xffffffff);
  142. if (priv->rev == 1) {
  143. tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
  144. LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
  145. LCDC_V1_UNDERFLOW_INT_ENA);
  146. tilcdc_set(dev, LCDC_DMA_CTRL_REG,
  147. LCDC_V1_END_OF_FRAME_INT_ENA);
  148. } else {
  149. tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
  150. LCDC_V2_UNDERFLOW_INT_ENA |
  151. LCDC_V2_END_OF_FRAME0_INT_ENA |
  152. LCDC_FRAME_DONE | LCDC_SYNC_LOST);
  153. }
  154. }
  155. static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
  156. {
  157. struct tilcdc_drm_private *priv = dev->dev_private;
  158. /* disable irqs that we might have enabled: */
  159. if (priv->rev == 1) {
  160. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
  161. LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
  162. LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
  163. tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
  164. LCDC_V1_END_OF_FRAME_INT_ENA);
  165. } else {
  166. tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
  167. LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
  168. LCDC_V2_END_OF_FRAME0_INT_ENA |
  169. LCDC_FRAME_DONE | LCDC_SYNC_LOST);
  170. }
  171. }
  172. static void reset(struct drm_crtc *crtc)
  173. {
  174. struct drm_device *dev = crtc->dev;
  175. struct tilcdc_drm_private *priv = dev->dev_private;
  176. if (priv->rev != 2)
  177. return;
  178. tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
  179. usleep_range(250, 1000);
  180. tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
  181. }
  182. /*
  183. * Calculate the percentage difference between the requested pixel clock rate
  184. * and the effective rate resulting from calculating the clock divider value.
  185. */
  186. static unsigned int tilcdc_pclk_diff(unsigned long rate,
  187. unsigned long real_rate)
  188. {
  189. int r = rate / 100, rr = real_rate / 100;
  190. return (unsigned int)(abs(((rr - r) * 100) / r));
  191. }
  192. static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
  193. {
  194. struct drm_device *dev = crtc->dev;
  195. struct tilcdc_drm_private *priv = dev->dev_private;
  196. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  197. unsigned long clk_rate, real_rate, req_rate;
  198. unsigned int clkdiv;
  199. int ret;
  200. clkdiv = 2; /* first try using a standard divider of 2 */
  201. /* mode.clock is in KHz, set_rate wants parameter in Hz */
  202. req_rate = crtc->mode.clock * 1000;
  203. ret = clk_set_rate(priv->clk, req_rate * clkdiv);
  204. clk_rate = clk_get_rate(priv->clk);
  205. if (ret < 0) {
  206. /*
  207. * If we fail to set the clock rate (some architectures don't
  208. * use the common clock framework yet and may not implement
  209. * all the clk API calls for every clock), try the next best
  210. * thing: adjusting the clock divider, unless clk_get_rate()
  211. * failed as well.
  212. */
  213. if (!clk_rate) {
  214. /* Nothing more we can do. Just bail out. */
  215. dev_err(dev->dev,
  216. "failed to set the pixel clock - unable to read current lcdc clock rate\n");
  217. return;
  218. }
  219. clkdiv = DIV_ROUND_CLOSEST(clk_rate, req_rate);
  220. /*
  221. * Emit a warning if the real clock rate resulting from the
  222. * calculated divider differs much from the requested rate.
  223. *
  224. * 5% is an arbitrary value - LCDs are usually quite tolerant
  225. * about pixel clock rates.
  226. */
  227. real_rate = clkdiv * req_rate;
  228. if (tilcdc_pclk_diff(clk_rate, real_rate) > 5) {
  229. dev_warn(dev->dev,
  230. "effective pixel clock rate (%luHz) differs from the calculated rate (%luHz)\n",
  231. clk_rate, real_rate);
  232. }
  233. }
  234. tilcdc_crtc->lcd_fck_rate = clk_rate;
  235. DBG("lcd_clk=%u, mode clock=%d, div=%u",
  236. tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
  237. /* Configure the LCD clock divisor. */
  238. tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
  239. LCDC_RASTER_MODE);
  240. if (priv->rev == 2)
  241. tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
  242. LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
  243. LCDC_V2_CORE_CLK_EN);
  244. }
  245. uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
  246. {
  247. return (uint) div_u64(1000llu * mode->htotal * mode->vtotal,
  248. mode->clock);
  249. }
  250. static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
  251. {
  252. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  253. struct drm_device *dev = crtc->dev;
  254. struct tilcdc_drm_private *priv = dev->dev_private;
  255. const struct tilcdc_panel_info *info = tilcdc_crtc->info;
  256. uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
  257. struct drm_display_mode *mode = &crtc->state->adjusted_mode;
  258. struct drm_framebuffer *fb = crtc->primary->state->fb;
  259. if (WARN_ON(!info))
  260. return;
  261. if (WARN_ON(!fb))
  262. return;
  263. /* Configure the Burst Size and fifo threshold of DMA: */
  264. reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
  265. switch (info->dma_burst_sz) {
  266. case 1:
  267. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1);
  268. break;
  269. case 2:
  270. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2);
  271. break;
  272. case 4:
  273. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4);
  274. break;
  275. case 8:
  276. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8);
  277. break;
  278. case 16:
  279. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
  280. break;
  281. default:
  282. dev_err(dev->dev, "invalid burst size\n");
  283. return;
  284. }
  285. reg |= (info->fifo_th << 8);
  286. tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
  287. /* Configure timings: */
  288. hbp = mode->htotal - mode->hsync_end;
  289. hfp = mode->hsync_start - mode->hdisplay;
  290. hsw = mode->hsync_end - mode->hsync_start;
  291. vbp = mode->vtotal - mode->vsync_end;
  292. vfp = mode->vsync_start - mode->vdisplay;
  293. vsw = mode->vsync_end - mode->vsync_start;
  294. DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
  295. mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
  296. /* Set AC Bias Period and Number of Transitions per Interrupt: */
  297. reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
  298. reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
  299. LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
  300. /*
  301. * subtract one from hfp, hbp, hsw because the hardware uses
  302. * a value of 0 as 1
  303. */
  304. if (priv->rev == 2) {
  305. /* clear bits we're going to set */
  306. reg &= ~0x78000033;
  307. reg |= ((hfp-1) & 0x300) >> 8;
  308. reg |= ((hbp-1) & 0x300) >> 4;
  309. reg |= ((hsw-1) & 0x3c0) << 21;
  310. }
  311. tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
  312. reg = (((mode->hdisplay >> 4) - 1) << 4) |
  313. (((hbp-1) & 0xff) << 24) |
  314. (((hfp-1) & 0xff) << 16) |
  315. (((hsw-1) & 0x3f) << 10);
  316. if (priv->rev == 2)
  317. reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
  318. tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
  319. reg = ((mode->vdisplay - 1) & 0x3ff) |
  320. ((vbp & 0xff) << 24) |
  321. ((vfp & 0xff) << 16) |
  322. (((vsw-1) & 0x3f) << 10);
  323. tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
  324. /*
  325. * be sure to set Bit 10 for the V2 LCDC controller,
  326. * otherwise limited to 1024 pixels width, stopping
  327. * 1920x1080 being supported.
  328. */
  329. if (priv->rev == 2) {
  330. if ((mode->vdisplay - 1) & 0x400) {
  331. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG,
  332. LCDC_LPP_B10);
  333. } else {
  334. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG,
  335. LCDC_LPP_B10);
  336. }
  337. }
  338. /* Configure display type: */
  339. reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
  340. ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
  341. LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK |
  342. 0x000ff000 /* Palette Loading Delay bits */);
  343. reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
  344. if (info->tft_alt_mode)
  345. reg |= LCDC_TFT_ALT_ENABLE;
  346. if (priv->rev == 2) {
  347. switch (fb->format->format) {
  348. case DRM_FORMAT_BGR565:
  349. case DRM_FORMAT_RGB565:
  350. break;
  351. case DRM_FORMAT_XBGR8888:
  352. case DRM_FORMAT_XRGB8888:
  353. reg |= LCDC_V2_TFT_24BPP_UNPACK;
  354. /* fallthrough */
  355. case DRM_FORMAT_BGR888:
  356. case DRM_FORMAT_RGB888:
  357. reg |= LCDC_V2_TFT_24BPP_MODE;
  358. break;
  359. default:
  360. dev_err(dev->dev, "invalid pixel format\n");
  361. return;
  362. }
  363. }
  364. reg |= info->fdd < 12;
  365. tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
  366. if (info->invert_pxl_clk)
  367. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
  368. else
  369. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
  370. if (info->sync_ctrl)
  371. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
  372. else
  373. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
  374. if (info->sync_edge)
  375. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
  376. else
  377. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
  378. if (mode->flags & DRM_MODE_FLAG_NHSYNC)
  379. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
  380. else
  381. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
  382. if (mode->flags & DRM_MODE_FLAG_NVSYNC)
  383. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
  384. else
  385. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
  386. if (info->raster_order)
  387. tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
  388. else
  389. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
  390. tilcdc_crtc_set_clk(crtc);
  391. tilcdc_crtc_load_palette(crtc);
  392. set_scanout(crtc, fb);
  393. drm_framebuffer_get(fb);
  394. crtc->hwmode = crtc->state->adjusted_mode;
  395. tilcdc_crtc->hvtotal_us =
  396. tilcdc_mode_hvtotal(&crtc->hwmode);
  397. }
  398. static void tilcdc_crtc_enable(struct drm_crtc *crtc)
  399. {
  400. struct drm_device *dev = crtc->dev;
  401. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  402. unsigned long flags;
  403. mutex_lock(&tilcdc_crtc->enable_lock);
  404. if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) {
  405. mutex_unlock(&tilcdc_crtc->enable_lock);
  406. return;
  407. }
  408. pm_runtime_get_sync(dev->dev);
  409. reset(crtc);
  410. tilcdc_crtc_set_mode(crtc);
  411. tilcdc_crtc_enable_irqs(dev);
  412. tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
  413. tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
  414. LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
  415. LCDC_PALETTE_LOAD_MODE_MASK);
  416. /* There is no real chance for a race here as the time stamp
  417. * is taken before the raster DMA is started. The spin-lock is
  418. * taken to have a memory barrier after taking the time-stamp
  419. * and to avoid a context switch between taking the stamp and
  420. * enabling the raster.
  421. */
  422. spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
  423. tilcdc_crtc->last_vblank = ktime_get();
  424. tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  425. spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
  426. drm_crtc_vblank_on(crtc);
  427. tilcdc_crtc->enabled = true;
  428. mutex_unlock(&tilcdc_crtc->enable_lock);
  429. }
  430. static void tilcdc_crtc_atomic_enable(struct drm_crtc *crtc,
  431. struct drm_crtc_state *old_state)
  432. {
  433. tilcdc_crtc_enable(crtc);
  434. }
  435. static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
  436. {
  437. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  438. struct drm_device *dev = crtc->dev;
  439. struct tilcdc_drm_private *priv = dev->dev_private;
  440. int ret;
  441. mutex_lock(&tilcdc_crtc->enable_lock);
  442. if (shutdown)
  443. tilcdc_crtc->shutdown = true;
  444. if (!tilcdc_crtc->enabled) {
  445. mutex_unlock(&tilcdc_crtc->enable_lock);
  446. return;
  447. }
  448. tilcdc_crtc->frame_done = false;
  449. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  450. /*
  451. * Wait for framedone irq which will still come before putting
  452. * things to sleep..
  453. */
  454. ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
  455. tilcdc_crtc->frame_done,
  456. msecs_to_jiffies(500));
  457. if (ret == 0)
  458. dev_err(dev->dev, "%s: timeout waiting for framedone\n",
  459. __func__);
  460. drm_crtc_vblank_off(crtc);
  461. tilcdc_crtc_disable_irqs(dev);
  462. pm_runtime_put_sync(dev->dev);
  463. if (tilcdc_crtc->next_fb) {
  464. drm_flip_work_queue(&tilcdc_crtc->unref_work,
  465. tilcdc_crtc->next_fb);
  466. tilcdc_crtc->next_fb = NULL;
  467. }
  468. if (tilcdc_crtc->curr_fb) {
  469. drm_flip_work_queue(&tilcdc_crtc->unref_work,
  470. tilcdc_crtc->curr_fb);
  471. tilcdc_crtc->curr_fb = NULL;
  472. }
  473. drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
  474. tilcdc_crtc->enabled = false;
  475. mutex_unlock(&tilcdc_crtc->enable_lock);
  476. }
  477. static void tilcdc_crtc_disable(struct drm_crtc *crtc)
  478. {
  479. tilcdc_crtc_off(crtc, false);
  480. }
  481. static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc,
  482. struct drm_crtc_state *old_state)
  483. {
  484. tilcdc_crtc_disable(crtc);
  485. }
  486. void tilcdc_crtc_shutdown(struct drm_crtc *crtc)
  487. {
  488. tilcdc_crtc_off(crtc, true);
  489. }
  490. static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
  491. {
  492. return crtc->state && crtc->state->enable && crtc->state->active;
  493. }
  494. static void tilcdc_crtc_recover_work(struct work_struct *work)
  495. {
  496. struct tilcdc_crtc *tilcdc_crtc =
  497. container_of(work, struct tilcdc_crtc, recover_work);
  498. struct drm_crtc *crtc = &tilcdc_crtc->base;
  499. dev_info(crtc->dev->dev, "%s: Reset CRTC", __func__);
  500. drm_modeset_lock(&crtc->mutex, NULL);
  501. if (!tilcdc_crtc_is_on(crtc))
  502. goto out;
  503. tilcdc_crtc_disable(crtc);
  504. tilcdc_crtc_enable(crtc);
  505. out:
  506. drm_modeset_unlock(&crtc->mutex);
  507. }
  508. static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
  509. {
  510. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  511. struct tilcdc_drm_private *priv = crtc->dev->dev_private;
  512. tilcdc_crtc_shutdown(crtc);
  513. flush_workqueue(priv->wq);
  514. of_node_put(crtc->port);
  515. drm_crtc_cleanup(crtc);
  516. drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
  517. }
  518. int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
  519. struct drm_framebuffer *fb,
  520. struct drm_pending_vblank_event *event)
  521. {
  522. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  523. struct drm_device *dev = crtc->dev;
  524. if (tilcdc_crtc->event) {
  525. dev_err(dev->dev, "already pending page flip!\n");
  526. return -EBUSY;
  527. }
  528. drm_framebuffer_get(fb);
  529. crtc->primary->fb = fb;
  530. tilcdc_crtc->event = event;
  531. mutex_lock(&tilcdc_crtc->enable_lock);
  532. if (tilcdc_crtc->enabled) {
  533. unsigned long flags;
  534. ktime_t next_vblank;
  535. s64 tdiff;
  536. spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
  537. next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
  538. tilcdc_crtc->hvtotal_us);
  539. tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
  540. if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
  541. tilcdc_crtc->next_fb = fb;
  542. else
  543. set_scanout(crtc, fb);
  544. spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
  545. }
  546. mutex_unlock(&tilcdc_crtc->enable_lock);
  547. return 0;
  548. }
  549. static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
  550. const struct drm_display_mode *mode,
  551. struct drm_display_mode *adjusted_mode)
  552. {
  553. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  554. if (!tilcdc_crtc->simulate_vesa_sync)
  555. return true;
  556. /*
  557. * tilcdc does not generate VESA-compliant sync but aligns
  558. * VS on the second edge of HS instead of first edge.
  559. * We use adjusted_mode, to fixup sync by aligning both rising
  560. * edges and add HSKEW offset to fix the sync.
  561. */
  562. adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
  563. adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
  564. if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
  565. adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
  566. adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
  567. } else {
  568. adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
  569. adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
  570. }
  571. return true;
  572. }
  573. static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
  574. struct drm_crtc_state *state)
  575. {
  576. struct drm_display_mode *mode = &state->mode;
  577. int ret;
  578. /* If we are not active we don't care */
  579. if (!state->active)
  580. return 0;
  581. if (state->state->planes[0].ptr != crtc->primary ||
  582. state->state->planes[0].state == NULL ||
  583. state->state->planes[0].state->crtc != crtc) {
  584. dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
  585. return -EINVAL;
  586. }
  587. ret = tilcdc_crtc_mode_valid(crtc, mode);
  588. if (ret) {
  589. dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name);
  590. return -EINVAL;
  591. }
  592. return 0;
  593. }
  594. static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc)
  595. {
  596. return 0;
  597. }
  598. static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc)
  599. {
  600. }
  601. static void tilcdc_crtc_reset(struct drm_crtc *crtc)
  602. {
  603. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  604. struct drm_device *dev = crtc->dev;
  605. int ret;
  606. drm_atomic_helper_crtc_reset(crtc);
  607. /* Turn the raster off if it for some reason is on. */
  608. pm_runtime_get_sync(dev->dev);
  609. if (tilcdc_read(dev, LCDC_RASTER_CTRL_REG) & LCDC_RASTER_ENABLE) {
  610. /* Enable DMA Frame Done Interrupt */
  611. tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_FRAME_DONE);
  612. tilcdc_clear_irqstatus(dev, 0xffffffff);
  613. tilcdc_crtc->frame_done = false;
  614. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  615. ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
  616. tilcdc_crtc->frame_done,
  617. msecs_to_jiffies(500));
  618. if (ret == 0)
  619. dev_err(dev->dev, "%s: timeout waiting for framedone\n",
  620. __func__);
  621. }
  622. pm_runtime_put_sync(dev->dev);
  623. }
  624. static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
  625. .destroy = tilcdc_crtc_destroy,
  626. .set_config = drm_atomic_helper_set_config,
  627. .page_flip = drm_atomic_helper_page_flip,
  628. .reset = tilcdc_crtc_reset,
  629. .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
  630. .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
  631. .enable_vblank = tilcdc_crtc_enable_vblank,
  632. .disable_vblank = tilcdc_crtc_disable_vblank,
  633. };
  634. static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
  635. .mode_fixup = tilcdc_crtc_mode_fixup,
  636. .atomic_check = tilcdc_crtc_atomic_check,
  637. .atomic_enable = tilcdc_crtc_atomic_enable,
  638. .atomic_disable = tilcdc_crtc_atomic_disable,
  639. };
  640. int tilcdc_crtc_max_width(struct drm_crtc *crtc)
  641. {
  642. struct drm_device *dev = crtc->dev;
  643. struct tilcdc_drm_private *priv = dev->dev_private;
  644. int max_width = 0;
  645. if (priv->rev == 1)
  646. max_width = 1024;
  647. else if (priv->rev == 2)
  648. max_width = 2048;
  649. return max_width;
  650. }
  651. int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
  652. {
  653. struct tilcdc_drm_private *priv = crtc->dev->dev_private;
  654. unsigned int bandwidth;
  655. uint32_t hbp, hfp, hsw, vbp, vfp, vsw;
  656. /*
  657. * check to see if the width is within the range that
  658. * the LCD Controller physically supports
  659. */
  660. if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
  661. return MODE_VIRTUAL_X;
  662. /* width must be multiple of 16 */
  663. if (mode->hdisplay & 0xf)
  664. return MODE_VIRTUAL_X;
  665. if (mode->vdisplay > 2048)
  666. return MODE_VIRTUAL_Y;
  667. DBG("Processing mode %dx%d@%d with pixel clock %d",
  668. mode->hdisplay, mode->vdisplay,
  669. drm_mode_vrefresh(mode), mode->clock);
  670. hbp = mode->htotal - mode->hsync_end;
  671. hfp = mode->hsync_start - mode->hdisplay;
  672. hsw = mode->hsync_end - mode->hsync_start;
  673. vbp = mode->vtotal - mode->vsync_end;
  674. vfp = mode->vsync_start - mode->vdisplay;
  675. vsw = mode->vsync_end - mode->vsync_start;
  676. if ((hbp-1) & ~0x3ff) {
  677. DBG("Pruning mode: Horizontal Back Porch out of range");
  678. return MODE_HBLANK_WIDE;
  679. }
  680. if ((hfp-1) & ~0x3ff) {
  681. DBG("Pruning mode: Horizontal Front Porch out of range");
  682. return MODE_HBLANK_WIDE;
  683. }
  684. if ((hsw-1) & ~0x3ff) {
  685. DBG("Pruning mode: Horizontal Sync Width out of range");
  686. return MODE_HSYNC_WIDE;
  687. }
  688. if (vbp & ~0xff) {
  689. DBG("Pruning mode: Vertical Back Porch out of range");
  690. return MODE_VBLANK_WIDE;
  691. }
  692. if (vfp & ~0xff) {
  693. DBG("Pruning mode: Vertical Front Porch out of range");
  694. return MODE_VBLANK_WIDE;
  695. }
  696. if ((vsw-1) & ~0x3f) {
  697. DBG("Pruning mode: Vertical Sync Width out of range");
  698. return MODE_VSYNC_WIDE;
  699. }
  700. /*
  701. * some devices have a maximum allowed pixel clock
  702. * configured from the DT
  703. */
  704. if (mode->clock > priv->max_pixelclock) {
  705. DBG("Pruning mode: pixel clock too high");
  706. return MODE_CLOCK_HIGH;
  707. }
  708. /*
  709. * some devices further limit the max horizontal resolution
  710. * configured from the DT
  711. */
  712. if (mode->hdisplay > priv->max_width)
  713. return MODE_BAD_WIDTH;
  714. /* filter out modes that would require too much memory bandwidth: */
  715. bandwidth = mode->hdisplay * mode->vdisplay *
  716. drm_mode_vrefresh(mode);
  717. if (bandwidth > priv->max_bandwidth) {
  718. DBG("Pruning mode: exceeds defined bandwidth limit");
  719. return MODE_BAD;
  720. }
  721. return MODE_OK;
  722. }
  723. void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
  724. const struct tilcdc_panel_info *info)
  725. {
  726. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  727. tilcdc_crtc->info = info;
  728. }
  729. void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
  730. bool simulate_vesa_sync)
  731. {
  732. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  733. tilcdc_crtc->simulate_vesa_sync = simulate_vesa_sync;
  734. }
  735. void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
  736. {
  737. struct drm_device *dev = crtc->dev;
  738. struct tilcdc_drm_private *priv = dev->dev_private;
  739. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  740. drm_modeset_lock(&crtc->mutex, NULL);
  741. if (tilcdc_crtc->lcd_fck_rate != clk_get_rate(priv->clk)) {
  742. if (tilcdc_crtc_is_on(crtc)) {
  743. pm_runtime_get_sync(dev->dev);
  744. tilcdc_crtc_disable(crtc);
  745. tilcdc_crtc_set_clk(crtc);
  746. tilcdc_crtc_enable(crtc);
  747. pm_runtime_put_sync(dev->dev);
  748. }
  749. }
  750. drm_modeset_unlock(&crtc->mutex);
  751. }
  752. #define SYNC_LOST_COUNT_LIMIT 50
  753. irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
  754. {
  755. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  756. struct drm_device *dev = crtc->dev;
  757. struct tilcdc_drm_private *priv = dev->dev_private;
  758. uint32_t stat, reg;
  759. stat = tilcdc_read_irqstatus(dev);
  760. tilcdc_clear_irqstatus(dev, stat);
  761. if (stat & LCDC_END_OF_FRAME0) {
  762. unsigned long flags;
  763. bool skip_event = false;
  764. ktime_t now;
  765. now = ktime_get();
  766. drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
  767. spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
  768. tilcdc_crtc->last_vblank = now;
  769. if (tilcdc_crtc->next_fb) {
  770. set_scanout(crtc, tilcdc_crtc->next_fb);
  771. tilcdc_crtc->next_fb = NULL;
  772. skip_event = true;
  773. }
  774. spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
  775. drm_crtc_handle_vblank(crtc);
  776. if (!skip_event) {
  777. struct drm_pending_vblank_event *event;
  778. spin_lock_irqsave(&dev->event_lock, flags);
  779. event = tilcdc_crtc->event;
  780. tilcdc_crtc->event = NULL;
  781. if (event)
  782. drm_crtc_send_vblank_event(crtc, event);
  783. spin_unlock_irqrestore(&dev->event_lock, flags);
  784. }
  785. if (tilcdc_crtc->frame_intact)
  786. tilcdc_crtc->sync_lost_count = 0;
  787. else
  788. tilcdc_crtc->frame_intact = true;
  789. }
  790. if (stat & LCDC_FIFO_UNDERFLOW)
  791. dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underflow",
  792. __func__, stat);
  793. if (stat & LCDC_PL_LOAD_DONE) {
  794. complete(&tilcdc_crtc->palette_loaded);
  795. if (priv->rev == 1)
  796. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
  797. LCDC_V1_PL_INT_ENA);
  798. else
  799. tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
  800. LCDC_V2_PL_INT_ENA);
  801. }
  802. if (stat & LCDC_SYNC_LOST) {
  803. dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
  804. __func__, stat);
  805. tilcdc_crtc->frame_intact = false;
  806. if (priv->rev == 1) {
  807. reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
  808. if (reg & LCDC_RASTER_ENABLE) {
  809. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
  810. LCDC_RASTER_ENABLE);
  811. tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
  812. LCDC_RASTER_ENABLE);
  813. }
  814. } else {
  815. if (tilcdc_crtc->sync_lost_count++ >
  816. SYNC_LOST_COUNT_LIMIT) {
  817. dev_err(dev->dev,
  818. "%s(0x%08x): Sync lost flood detected, recovering",
  819. __func__, stat);
  820. queue_work(system_wq,
  821. &tilcdc_crtc->recover_work);
  822. tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
  823. LCDC_SYNC_LOST);
  824. tilcdc_crtc->sync_lost_count = 0;
  825. }
  826. }
  827. }
  828. if (stat & LCDC_FRAME_DONE) {
  829. tilcdc_crtc->frame_done = true;
  830. wake_up(&tilcdc_crtc->frame_done_wq);
  831. /* rev 1 lcdc appears to hang if irq is not disbaled here */
  832. if (priv->rev == 1)
  833. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
  834. LCDC_V1_FRAME_DONE_INT_ENA);
  835. }
  836. /* For revision 2 only */
  837. if (priv->rev == 2) {
  838. /* Indicate to LCDC that the interrupt service routine has
  839. * completed, see 13.3.6.1.6 in AM335x TRM.
  840. */
  841. tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
  842. }
  843. return IRQ_HANDLED;
  844. }
  845. int tilcdc_crtc_create(struct drm_device *dev)
  846. {
  847. struct tilcdc_drm_private *priv = dev->dev_private;
  848. struct tilcdc_crtc *tilcdc_crtc;
  849. struct drm_crtc *crtc;
  850. int ret;
  851. tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
  852. if (!tilcdc_crtc) {
  853. dev_err(dev->dev, "allocation failed\n");
  854. return -ENOMEM;
  855. }
  856. init_completion(&tilcdc_crtc->palette_loaded);
  857. tilcdc_crtc->palette_base = dmam_alloc_coherent(dev->dev,
  858. TILCDC_PALETTE_SIZE,
  859. &tilcdc_crtc->palette_dma_handle,
  860. GFP_KERNEL | __GFP_ZERO);
  861. if (!tilcdc_crtc->palette_base)
  862. return -ENOMEM;
  863. *tilcdc_crtc->palette_base = TILCDC_PALETTE_FIRST_ENTRY;
  864. crtc = &tilcdc_crtc->base;
  865. ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
  866. if (ret < 0)
  867. goto fail;
  868. mutex_init(&tilcdc_crtc->enable_lock);
  869. init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
  870. drm_flip_work_init(&tilcdc_crtc->unref_work,
  871. "unref", unref_worker);
  872. spin_lock_init(&tilcdc_crtc->irq_lock);
  873. INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work);
  874. ret = drm_crtc_init_with_planes(dev, crtc,
  875. &tilcdc_crtc->primary,
  876. NULL,
  877. &tilcdc_crtc_funcs,
  878. "tilcdc crtc");
  879. if (ret < 0)
  880. goto fail;
  881. drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
  882. if (priv->is_componentized) {
  883. crtc->port = of_graph_get_port_by_id(dev->dev->of_node, 0);
  884. if (!crtc->port) { /* This should never happen */
  885. dev_err(dev->dev, "Port node not found in %pOF\n",
  886. dev->dev->of_node);
  887. ret = -EINVAL;
  888. goto fail;
  889. }
  890. }
  891. priv->crtc = crtc;
  892. return 0;
  893. fail:
  894. tilcdc_crtc_destroy(crtc);
  895. return ret;
  896. }