tilcdc_crtc.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054
  1. /*
  2. * Copyright (C) 2012 Texas Instruments
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <drm/drm_atomic.h>
  18. #include <drm/drm_atomic_helper.h>
  19. #include <drm/drm_crtc.h>
  20. #include <drm/drm_flip_work.h>
  21. #include <drm/drm_plane_helper.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/completion.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/of_graph.h>
  26. #include "tilcdc_drv.h"
  27. #include "tilcdc_regs.h"
  28. #define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
  29. #define TILCDC_PALETTE_SIZE 32
  30. #define TILCDC_PALETTE_FIRST_ENTRY 0x4000
  31. struct tilcdc_crtc {
  32. struct drm_crtc base;
  33. struct drm_plane primary;
  34. const struct tilcdc_panel_info *info;
  35. struct drm_pending_vblank_event *event;
  36. struct mutex enable_lock;
  37. bool enabled;
  38. bool shutdown;
  39. wait_queue_head_t frame_done_wq;
  40. bool frame_done;
  41. spinlock_t irq_lock;
  42. unsigned int lcd_fck_rate;
  43. ktime_t last_vblank;
  44. struct drm_framebuffer *curr_fb;
  45. struct drm_framebuffer *next_fb;
  46. /* for deferred fb unref's: */
  47. struct drm_flip_work unref_work;
  48. /* Only set if an external encoder is connected */
  49. bool simulate_vesa_sync;
  50. int sync_lost_count;
  51. bool frame_intact;
  52. struct work_struct recover_work;
  53. dma_addr_t palette_dma_handle;
  54. u16 *palette_base;
  55. struct completion palette_loaded;
  56. };
  57. #define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
  58. static void unref_worker(struct drm_flip_work *work, void *val)
  59. {
  60. struct tilcdc_crtc *tilcdc_crtc =
  61. container_of(work, struct tilcdc_crtc, unref_work);
  62. struct drm_device *dev = tilcdc_crtc->base.dev;
  63. mutex_lock(&dev->mode_config.mutex);
  64. drm_framebuffer_unreference(val);
  65. mutex_unlock(&dev->mode_config.mutex);
  66. }
  67. static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
  68. {
  69. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  70. struct drm_device *dev = crtc->dev;
  71. struct tilcdc_drm_private *priv = dev->dev_private;
  72. struct drm_gem_cma_object *gem;
  73. dma_addr_t start, end;
  74. u64 dma_base_and_ceiling;
  75. gem = drm_fb_cma_get_gem_obj(fb, 0);
  76. start = gem->paddr + fb->offsets[0] +
  77. crtc->y * fb->pitches[0] +
  78. crtc->x * fb->format->cpp[0];
  79. end = start + (crtc->mode.vdisplay * fb->pitches[0]);
  80. /* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG
  81. * with a single insruction, if available. This should make it more
  82. * unlikely that LCDC would fetch the DMA addresses in the middle of
  83. * an update.
  84. */
  85. if (priv->rev == 1)
  86. end -= 1;
  87. dma_base_and_ceiling = (u64)end << 32 | start;
  88. tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
  89. if (tilcdc_crtc->curr_fb)
  90. drm_flip_work_queue(&tilcdc_crtc->unref_work,
  91. tilcdc_crtc->curr_fb);
  92. tilcdc_crtc->curr_fb = fb;
  93. }
  94. /*
  95. * The driver currently only supports only true color formats. For
  96. * true color the palette block is bypassed, but a 32 byte palette
  97. * should still be loaded. The first 16-bit entry must be 0x4000 while
  98. * all other entries must be zeroed.
  99. */
  100. static void tilcdc_crtc_load_palette(struct drm_crtc *crtc)
  101. {
  102. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  103. struct drm_device *dev = crtc->dev;
  104. struct tilcdc_drm_private *priv = dev->dev_private;
  105. int ret;
  106. reinit_completion(&tilcdc_crtc->palette_loaded);
  107. /* Tell the LCDC where the palette is located. */
  108. tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG,
  109. tilcdc_crtc->palette_dma_handle);
  110. tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG,
  111. (u32) tilcdc_crtc->palette_dma_handle +
  112. TILCDC_PALETTE_SIZE - 1);
  113. /* Set dma load mode for palette loading only. */
  114. tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
  115. LCDC_PALETTE_LOAD_MODE(PALETTE_ONLY),
  116. LCDC_PALETTE_LOAD_MODE_MASK);
  117. /* Enable DMA Palette Loaded Interrupt */
  118. if (priv->rev == 1)
  119. tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
  120. else
  121. tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_PL_INT_ENA);
  122. /* Enable LCDC DMA and wait for palette to be loaded. */
  123. tilcdc_clear_irqstatus(dev, 0xffffffff);
  124. tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  125. ret = wait_for_completion_timeout(&tilcdc_crtc->palette_loaded,
  126. msecs_to_jiffies(50));
  127. if (ret == 0)
  128. dev_err(dev->dev, "%s: Palette loading timeout", __func__);
  129. /* Disable LCDC DMA and DMA Palette Loaded Interrupt. */
  130. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  131. if (priv->rev == 1)
  132. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
  133. else
  134. tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_V2_PL_INT_ENA);
  135. }
  136. static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
  137. {
  138. struct tilcdc_drm_private *priv = dev->dev_private;
  139. tilcdc_clear_irqstatus(dev, 0xffffffff);
  140. if (priv->rev == 1) {
  141. tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
  142. LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
  143. LCDC_V1_UNDERFLOW_INT_ENA);
  144. tilcdc_set(dev, LCDC_DMA_CTRL_REG,
  145. LCDC_V1_END_OF_FRAME_INT_ENA);
  146. } else {
  147. tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
  148. LCDC_V2_UNDERFLOW_INT_ENA |
  149. LCDC_V2_END_OF_FRAME0_INT_ENA |
  150. LCDC_FRAME_DONE | LCDC_SYNC_LOST);
  151. }
  152. }
  153. static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
  154. {
  155. struct tilcdc_drm_private *priv = dev->dev_private;
  156. /* disable irqs that we might have enabled: */
  157. if (priv->rev == 1) {
  158. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
  159. LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
  160. LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
  161. tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
  162. LCDC_V1_END_OF_FRAME_INT_ENA);
  163. } else {
  164. tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
  165. LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
  166. LCDC_V2_END_OF_FRAME0_INT_ENA |
  167. LCDC_FRAME_DONE | LCDC_SYNC_LOST);
  168. }
  169. }
  170. static void reset(struct drm_crtc *crtc)
  171. {
  172. struct drm_device *dev = crtc->dev;
  173. struct tilcdc_drm_private *priv = dev->dev_private;
  174. if (priv->rev != 2)
  175. return;
  176. tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
  177. usleep_range(250, 1000);
  178. tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
  179. }
  180. /*
  181. * Calculate the percentage difference between the requested pixel clock rate
  182. * and the effective rate resulting from calculating the clock divider value.
  183. */
  184. static unsigned int tilcdc_pclk_diff(unsigned long rate,
  185. unsigned long real_rate)
  186. {
  187. int r = rate / 100, rr = real_rate / 100;
  188. return (unsigned int)(abs(((rr - r) * 100) / r));
  189. }
  190. static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
  191. {
  192. struct drm_device *dev = crtc->dev;
  193. struct tilcdc_drm_private *priv = dev->dev_private;
  194. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  195. unsigned long clk_rate, real_rate, req_rate;
  196. unsigned int clkdiv;
  197. int ret;
  198. clkdiv = 2; /* first try using a standard divider of 2 */
  199. /* mode.clock is in KHz, set_rate wants parameter in Hz */
  200. req_rate = crtc->mode.clock * 1000;
  201. ret = clk_set_rate(priv->clk, req_rate * clkdiv);
  202. clk_rate = clk_get_rate(priv->clk);
  203. if (ret < 0) {
  204. /*
  205. * If we fail to set the clock rate (some architectures don't
  206. * use the common clock framework yet and may not implement
  207. * all the clk API calls for every clock), try the next best
  208. * thing: adjusting the clock divider, unless clk_get_rate()
  209. * failed as well.
  210. */
  211. if (!clk_rate) {
  212. /* Nothing more we can do. Just bail out. */
  213. dev_err(dev->dev,
  214. "failed to set the pixel clock - unable to read current lcdc clock rate\n");
  215. return;
  216. }
  217. clkdiv = DIV_ROUND_CLOSEST(clk_rate, req_rate);
  218. /*
  219. * Emit a warning if the real clock rate resulting from the
  220. * calculated divider differs much from the requested rate.
  221. *
  222. * 5% is an arbitrary value - LCDs are usually quite tolerant
  223. * about pixel clock rates.
  224. */
  225. real_rate = clkdiv * req_rate;
  226. if (tilcdc_pclk_diff(clk_rate, real_rate) > 5) {
  227. dev_warn(dev->dev,
  228. "effective pixel clock rate (%luHz) differs from the calculated rate (%luHz)\n",
  229. clk_rate, real_rate);
  230. }
  231. }
  232. tilcdc_crtc->lcd_fck_rate = clk_rate;
  233. DBG("lcd_clk=%u, mode clock=%d, div=%u",
  234. tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
  235. /* Configure the LCD clock divisor. */
  236. tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
  237. LCDC_RASTER_MODE);
  238. if (priv->rev == 2)
  239. tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
  240. LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
  241. LCDC_V2_CORE_CLK_EN);
  242. }
  243. static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
  244. {
  245. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  246. struct drm_device *dev = crtc->dev;
  247. struct tilcdc_drm_private *priv = dev->dev_private;
  248. const struct tilcdc_panel_info *info = tilcdc_crtc->info;
  249. uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
  250. struct drm_display_mode *mode = &crtc->state->adjusted_mode;
  251. struct drm_framebuffer *fb = crtc->primary->state->fb;
  252. if (WARN_ON(!info))
  253. return;
  254. if (WARN_ON(!fb))
  255. return;
  256. /* Configure the Burst Size and fifo threshold of DMA: */
  257. reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
  258. switch (info->dma_burst_sz) {
  259. case 1:
  260. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1);
  261. break;
  262. case 2:
  263. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2);
  264. break;
  265. case 4:
  266. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4);
  267. break;
  268. case 8:
  269. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8);
  270. break;
  271. case 16:
  272. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
  273. break;
  274. default:
  275. dev_err(dev->dev, "invalid burst size\n");
  276. return;
  277. }
  278. reg |= (info->fifo_th << 8);
  279. tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
  280. /* Configure timings: */
  281. hbp = mode->htotal - mode->hsync_end;
  282. hfp = mode->hsync_start - mode->hdisplay;
  283. hsw = mode->hsync_end - mode->hsync_start;
  284. vbp = mode->vtotal - mode->vsync_end;
  285. vfp = mode->vsync_start - mode->vdisplay;
  286. vsw = mode->vsync_end - mode->vsync_start;
  287. DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
  288. mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
  289. /* Set AC Bias Period and Number of Transitions per Interrupt: */
  290. reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
  291. reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
  292. LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
  293. /*
  294. * subtract one from hfp, hbp, hsw because the hardware uses
  295. * a value of 0 as 1
  296. */
  297. if (priv->rev == 2) {
  298. /* clear bits we're going to set */
  299. reg &= ~0x78000033;
  300. reg |= ((hfp-1) & 0x300) >> 8;
  301. reg |= ((hbp-1) & 0x300) >> 4;
  302. reg |= ((hsw-1) & 0x3c0) << 21;
  303. }
  304. tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
  305. reg = (((mode->hdisplay >> 4) - 1) << 4) |
  306. (((hbp-1) & 0xff) << 24) |
  307. (((hfp-1) & 0xff) << 16) |
  308. (((hsw-1) & 0x3f) << 10);
  309. if (priv->rev == 2)
  310. reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
  311. tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
  312. reg = ((mode->vdisplay - 1) & 0x3ff) |
  313. ((vbp & 0xff) << 24) |
  314. ((vfp & 0xff) << 16) |
  315. (((vsw-1) & 0x3f) << 10);
  316. tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
  317. /*
  318. * be sure to set Bit 10 for the V2 LCDC controller,
  319. * otherwise limited to 1024 pixels width, stopping
  320. * 1920x1080 being supported.
  321. */
  322. if (priv->rev == 2) {
  323. if ((mode->vdisplay - 1) & 0x400) {
  324. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG,
  325. LCDC_LPP_B10);
  326. } else {
  327. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG,
  328. LCDC_LPP_B10);
  329. }
  330. }
  331. /* Configure display type: */
  332. reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
  333. ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
  334. LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK |
  335. 0x000ff000 /* Palette Loading Delay bits */);
  336. reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
  337. if (info->tft_alt_mode)
  338. reg |= LCDC_TFT_ALT_ENABLE;
  339. if (priv->rev == 2) {
  340. switch (fb->format->format) {
  341. case DRM_FORMAT_BGR565:
  342. case DRM_FORMAT_RGB565:
  343. break;
  344. case DRM_FORMAT_XBGR8888:
  345. case DRM_FORMAT_XRGB8888:
  346. reg |= LCDC_V2_TFT_24BPP_UNPACK;
  347. /* fallthrough */
  348. case DRM_FORMAT_BGR888:
  349. case DRM_FORMAT_RGB888:
  350. reg |= LCDC_V2_TFT_24BPP_MODE;
  351. break;
  352. default:
  353. dev_err(dev->dev, "invalid pixel format\n");
  354. return;
  355. }
  356. }
  357. reg |= info->fdd < 12;
  358. tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
  359. if (info->invert_pxl_clk)
  360. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
  361. else
  362. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
  363. if (info->sync_ctrl)
  364. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
  365. else
  366. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
  367. if (info->sync_edge)
  368. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
  369. else
  370. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
  371. if (mode->flags & DRM_MODE_FLAG_NHSYNC)
  372. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
  373. else
  374. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
  375. if (mode->flags & DRM_MODE_FLAG_NVSYNC)
  376. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
  377. else
  378. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
  379. if (info->raster_order)
  380. tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
  381. else
  382. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
  383. tilcdc_crtc_set_clk(crtc);
  384. tilcdc_crtc_load_palette(crtc);
  385. set_scanout(crtc, fb);
  386. drm_framebuffer_reference(fb);
  387. crtc->hwmode = crtc->state->adjusted_mode;
  388. }
  389. static void tilcdc_crtc_enable(struct drm_crtc *crtc)
  390. {
  391. struct drm_device *dev = crtc->dev;
  392. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  393. unsigned long flags;
  394. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  395. mutex_lock(&tilcdc_crtc->enable_lock);
  396. if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) {
  397. mutex_unlock(&tilcdc_crtc->enable_lock);
  398. return;
  399. }
  400. pm_runtime_get_sync(dev->dev);
  401. reset(crtc);
  402. tilcdc_crtc_set_mode(crtc);
  403. tilcdc_crtc_enable_irqs(dev);
  404. tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
  405. tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
  406. LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
  407. LCDC_PALETTE_LOAD_MODE_MASK);
  408. /* There is no real chance for a race here as the time stamp
  409. * is taken before the raster DMA is started. The spin-lock is
  410. * taken to have a memory barrier after taking the time-stamp
  411. * and to avoid a context switch between taking the stamp and
  412. * enabling the raster.
  413. */
  414. spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
  415. tilcdc_crtc->last_vblank = ktime_get();
  416. tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  417. spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
  418. drm_crtc_vblank_on(crtc);
  419. tilcdc_crtc->enabled = true;
  420. mutex_unlock(&tilcdc_crtc->enable_lock);
  421. }
  422. static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
  423. {
  424. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  425. struct drm_device *dev = crtc->dev;
  426. struct tilcdc_drm_private *priv = dev->dev_private;
  427. int ret;
  428. mutex_lock(&tilcdc_crtc->enable_lock);
  429. if (shutdown)
  430. tilcdc_crtc->shutdown = true;
  431. if (!tilcdc_crtc->enabled) {
  432. mutex_unlock(&tilcdc_crtc->enable_lock);
  433. return;
  434. }
  435. tilcdc_crtc->frame_done = false;
  436. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  437. /*
  438. * Wait for framedone irq which will still come before putting
  439. * things to sleep..
  440. */
  441. ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
  442. tilcdc_crtc->frame_done,
  443. msecs_to_jiffies(500));
  444. if (ret == 0)
  445. dev_err(dev->dev, "%s: timeout waiting for framedone\n",
  446. __func__);
  447. drm_crtc_vblank_off(crtc);
  448. tilcdc_crtc_disable_irqs(dev);
  449. pm_runtime_put_sync(dev->dev);
  450. if (tilcdc_crtc->next_fb) {
  451. drm_flip_work_queue(&tilcdc_crtc->unref_work,
  452. tilcdc_crtc->next_fb);
  453. tilcdc_crtc->next_fb = NULL;
  454. }
  455. if (tilcdc_crtc->curr_fb) {
  456. drm_flip_work_queue(&tilcdc_crtc->unref_work,
  457. tilcdc_crtc->curr_fb);
  458. tilcdc_crtc->curr_fb = NULL;
  459. }
  460. drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
  461. tilcdc_crtc->enabled = false;
  462. mutex_unlock(&tilcdc_crtc->enable_lock);
  463. }
  464. static void tilcdc_crtc_disable(struct drm_crtc *crtc)
  465. {
  466. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  467. tilcdc_crtc_off(crtc, false);
  468. }
  469. void tilcdc_crtc_shutdown(struct drm_crtc *crtc)
  470. {
  471. tilcdc_crtc_off(crtc, true);
  472. }
  473. static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
  474. {
  475. return crtc->state && crtc->state->enable && crtc->state->active;
  476. }
  477. static void tilcdc_crtc_recover_work(struct work_struct *work)
  478. {
  479. struct tilcdc_crtc *tilcdc_crtc =
  480. container_of(work, struct tilcdc_crtc, recover_work);
  481. struct drm_crtc *crtc = &tilcdc_crtc->base;
  482. dev_info(crtc->dev->dev, "%s: Reset CRTC", __func__);
  483. drm_modeset_lock(&crtc->mutex, NULL);
  484. if (!tilcdc_crtc_is_on(crtc))
  485. goto out;
  486. tilcdc_crtc_disable(crtc);
  487. tilcdc_crtc_enable(crtc);
  488. out:
  489. drm_modeset_unlock(&crtc->mutex);
  490. }
  491. static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
  492. {
  493. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  494. struct tilcdc_drm_private *priv = crtc->dev->dev_private;
  495. drm_modeset_lock(&crtc->mutex, NULL);
  496. tilcdc_crtc_disable(crtc);
  497. drm_modeset_unlock(&crtc->mutex);
  498. flush_workqueue(priv->wq);
  499. of_node_put(crtc->port);
  500. drm_crtc_cleanup(crtc);
  501. drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
  502. }
  503. int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
  504. struct drm_framebuffer *fb,
  505. struct drm_pending_vblank_event *event)
  506. {
  507. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  508. struct drm_device *dev = crtc->dev;
  509. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  510. if (tilcdc_crtc->event) {
  511. dev_err(dev->dev, "already pending page flip!\n");
  512. return -EBUSY;
  513. }
  514. drm_framebuffer_reference(fb);
  515. crtc->primary->fb = fb;
  516. tilcdc_crtc->event = event;
  517. mutex_lock(&tilcdc_crtc->enable_lock);
  518. if (tilcdc_crtc->enabled) {
  519. unsigned long flags;
  520. ktime_t next_vblank;
  521. s64 tdiff;
  522. spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
  523. next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
  524. 1000000 / crtc->hwmode.vrefresh);
  525. tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
  526. if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
  527. tilcdc_crtc->next_fb = fb;
  528. else
  529. set_scanout(crtc, fb);
  530. spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
  531. }
  532. mutex_unlock(&tilcdc_crtc->enable_lock);
  533. return 0;
  534. }
  535. static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
  536. const struct drm_display_mode *mode,
  537. struct drm_display_mode *adjusted_mode)
  538. {
  539. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  540. if (!tilcdc_crtc->simulate_vesa_sync)
  541. return true;
  542. /*
  543. * tilcdc does not generate VESA-compliant sync but aligns
  544. * VS on the second edge of HS instead of first edge.
  545. * We use adjusted_mode, to fixup sync by aligning both rising
  546. * edges and add HSKEW offset to fix the sync.
  547. */
  548. adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
  549. adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
  550. if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
  551. adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
  552. adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
  553. } else {
  554. adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
  555. adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
  556. }
  557. return true;
  558. }
  559. static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
  560. struct drm_crtc_state *state)
  561. {
  562. struct drm_display_mode *mode = &state->mode;
  563. int ret;
  564. /* If we are not active we don't care */
  565. if (!state->active)
  566. return 0;
  567. if (state->state->planes[0].ptr != crtc->primary ||
  568. state->state->planes[0].state == NULL ||
  569. state->state->planes[0].state->crtc != crtc) {
  570. dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
  571. return -EINVAL;
  572. }
  573. ret = tilcdc_crtc_mode_valid(crtc, mode);
  574. if (ret) {
  575. dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name);
  576. return -EINVAL;
  577. }
  578. return 0;
  579. }
  580. static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc)
  581. {
  582. return 0;
  583. }
  584. static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc)
  585. {
  586. }
  587. static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
  588. .destroy = tilcdc_crtc_destroy,
  589. .set_config = drm_atomic_helper_set_config,
  590. .page_flip = drm_atomic_helper_page_flip,
  591. .reset = drm_atomic_helper_crtc_reset,
  592. .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
  593. .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
  594. .enable_vblank = tilcdc_crtc_enable_vblank,
  595. .disable_vblank = tilcdc_crtc_disable_vblank,
  596. };
  597. static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
  598. .mode_fixup = tilcdc_crtc_mode_fixup,
  599. .enable = tilcdc_crtc_enable,
  600. .disable = tilcdc_crtc_disable,
  601. .atomic_check = tilcdc_crtc_atomic_check,
  602. };
  603. int tilcdc_crtc_max_width(struct drm_crtc *crtc)
  604. {
  605. struct drm_device *dev = crtc->dev;
  606. struct tilcdc_drm_private *priv = dev->dev_private;
  607. int max_width = 0;
  608. if (priv->rev == 1)
  609. max_width = 1024;
  610. else if (priv->rev == 2)
  611. max_width = 2048;
  612. return max_width;
  613. }
  614. int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
  615. {
  616. struct tilcdc_drm_private *priv = crtc->dev->dev_private;
  617. unsigned int bandwidth;
  618. uint32_t hbp, hfp, hsw, vbp, vfp, vsw;
  619. /*
  620. * check to see if the width is within the range that
  621. * the LCD Controller physically supports
  622. */
  623. if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
  624. return MODE_VIRTUAL_X;
  625. /* width must be multiple of 16 */
  626. if (mode->hdisplay & 0xf)
  627. return MODE_VIRTUAL_X;
  628. if (mode->vdisplay > 2048)
  629. return MODE_VIRTUAL_Y;
  630. DBG("Processing mode %dx%d@%d with pixel clock %d",
  631. mode->hdisplay, mode->vdisplay,
  632. drm_mode_vrefresh(mode), mode->clock);
  633. hbp = mode->htotal - mode->hsync_end;
  634. hfp = mode->hsync_start - mode->hdisplay;
  635. hsw = mode->hsync_end - mode->hsync_start;
  636. vbp = mode->vtotal - mode->vsync_end;
  637. vfp = mode->vsync_start - mode->vdisplay;
  638. vsw = mode->vsync_end - mode->vsync_start;
  639. if ((hbp-1) & ~0x3ff) {
  640. DBG("Pruning mode: Horizontal Back Porch out of range");
  641. return MODE_HBLANK_WIDE;
  642. }
  643. if ((hfp-1) & ~0x3ff) {
  644. DBG("Pruning mode: Horizontal Front Porch out of range");
  645. return MODE_HBLANK_WIDE;
  646. }
  647. if ((hsw-1) & ~0x3ff) {
  648. DBG("Pruning mode: Horizontal Sync Width out of range");
  649. return MODE_HSYNC_WIDE;
  650. }
  651. if (vbp & ~0xff) {
  652. DBG("Pruning mode: Vertical Back Porch out of range");
  653. return MODE_VBLANK_WIDE;
  654. }
  655. if (vfp & ~0xff) {
  656. DBG("Pruning mode: Vertical Front Porch out of range");
  657. return MODE_VBLANK_WIDE;
  658. }
  659. if ((vsw-1) & ~0x3f) {
  660. DBG("Pruning mode: Vertical Sync Width out of range");
  661. return MODE_VSYNC_WIDE;
  662. }
  663. /*
  664. * some devices have a maximum allowed pixel clock
  665. * configured from the DT
  666. */
  667. if (mode->clock > priv->max_pixelclock) {
  668. DBG("Pruning mode: pixel clock too high");
  669. return MODE_CLOCK_HIGH;
  670. }
  671. /*
  672. * some devices further limit the max horizontal resolution
  673. * configured from the DT
  674. */
  675. if (mode->hdisplay > priv->max_width)
  676. return MODE_BAD_WIDTH;
  677. /* filter out modes that would require too much memory bandwidth: */
  678. bandwidth = mode->hdisplay * mode->vdisplay *
  679. drm_mode_vrefresh(mode);
  680. if (bandwidth > priv->max_bandwidth) {
  681. DBG("Pruning mode: exceeds defined bandwidth limit");
  682. return MODE_BAD;
  683. }
  684. return MODE_OK;
  685. }
  686. void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
  687. const struct tilcdc_panel_info *info)
  688. {
  689. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  690. tilcdc_crtc->info = info;
  691. }
  692. void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
  693. bool simulate_vesa_sync)
  694. {
  695. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  696. tilcdc_crtc->simulate_vesa_sync = simulate_vesa_sync;
  697. }
  698. void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
  699. {
  700. struct drm_device *dev = crtc->dev;
  701. struct tilcdc_drm_private *priv = dev->dev_private;
  702. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  703. drm_modeset_lock(&crtc->mutex, NULL);
  704. if (tilcdc_crtc->lcd_fck_rate != clk_get_rate(priv->clk)) {
  705. if (tilcdc_crtc_is_on(crtc)) {
  706. pm_runtime_get_sync(dev->dev);
  707. tilcdc_crtc_disable(crtc);
  708. tilcdc_crtc_set_clk(crtc);
  709. tilcdc_crtc_enable(crtc);
  710. pm_runtime_put_sync(dev->dev);
  711. }
  712. }
  713. drm_modeset_unlock(&crtc->mutex);
  714. }
  715. #define SYNC_LOST_COUNT_LIMIT 50
  716. irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
  717. {
  718. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  719. struct drm_device *dev = crtc->dev;
  720. struct tilcdc_drm_private *priv = dev->dev_private;
  721. uint32_t stat, reg;
  722. stat = tilcdc_read_irqstatus(dev);
  723. tilcdc_clear_irqstatus(dev, stat);
  724. if (stat & LCDC_END_OF_FRAME0) {
  725. unsigned long flags;
  726. bool skip_event = false;
  727. ktime_t now;
  728. now = ktime_get();
  729. drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
  730. spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
  731. tilcdc_crtc->last_vblank = now;
  732. if (tilcdc_crtc->next_fb) {
  733. set_scanout(crtc, tilcdc_crtc->next_fb);
  734. tilcdc_crtc->next_fb = NULL;
  735. skip_event = true;
  736. }
  737. spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
  738. drm_crtc_handle_vblank(crtc);
  739. if (!skip_event) {
  740. struct drm_pending_vblank_event *event;
  741. spin_lock_irqsave(&dev->event_lock, flags);
  742. event = tilcdc_crtc->event;
  743. tilcdc_crtc->event = NULL;
  744. if (event)
  745. drm_crtc_send_vblank_event(crtc, event);
  746. spin_unlock_irqrestore(&dev->event_lock, flags);
  747. }
  748. if (tilcdc_crtc->frame_intact)
  749. tilcdc_crtc->sync_lost_count = 0;
  750. else
  751. tilcdc_crtc->frame_intact = true;
  752. }
  753. if (stat & LCDC_FIFO_UNDERFLOW)
  754. dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underflow",
  755. __func__, stat);
  756. if (stat & LCDC_PL_LOAD_DONE) {
  757. complete(&tilcdc_crtc->palette_loaded);
  758. if (priv->rev == 1)
  759. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
  760. LCDC_V1_PL_INT_ENA);
  761. else
  762. tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
  763. LCDC_V2_PL_INT_ENA);
  764. }
  765. if (stat & LCDC_SYNC_LOST) {
  766. dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
  767. __func__, stat);
  768. tilcdc_crtc->frame_intact = false;
  769. if (priv->rev == 1) {
  770. reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
  771. if (reg & LCDC_RASTER_ENABLE) {
  772. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
  773. LCDC_RASTER_ENABLE);
  774. tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
  775. LCDC_RASTER_ENABLE);
  776. }
  777. } else {
  778. if (tilcdc_crtc->sync_lost_count++ >
  779. SYNC_LOST_COUNT_LIMIT) {
  780. dev_err(dev->dev,
  781. "%s(0x%08x): Sync lost flood detected, recovering",
  782. __func__, stat);
  783. queue_work(system_wq,
  784. &tilcdc_crtc->recover_work);
  785. tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
  786. LCDC_SYNC_LOST);
  787. tilcdc_crtc->sync_lost_count = 0;
  788. }
  789. }
  790. }
  791. if (stat & LCDC_FRAME_DONE) {
  792. tilcdc_crtc->frame_done = true;
  793. wake_up(&tilcdc_crtc->frame_done_wq);
  794. /* rev 1 lcdc appears to hang if irq is not disbaled here */
  795. if (priv->rev == 1)
  796. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
  797. LCDC_V1_FRAME_DONE_INT_ENA);
  798. }
  799. /* For revision 2 only */
  800. if (priv->rev == 2) {
  801. /* Indicate to LCDC that the interrupt service routine has
  802. * completed, see 13.3.6.1.6 in AM335x TRM.
  803. */
  804. tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
  805. }
  806. return IRQ_HANDLED;
  807. }
  808. int tilcdc_crtc_create(struct drm_device *dev)
  809. {
  810. struct tilcdc_drm_private *priv = dev->dev_private;
  811. struct tilcdc_crtc *tilcdc_crtc;
  812. struct drm_crtc *crtc;
  813. int ret;
  814. tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
  815. if (!tilcdc_crtc) {
  816. dev_err(dev->dev, "allocation failed\n");
  817. return -ENOMEM;
  818. }
  819. init_completion(&tilcdc_crtc->palette_loaded);
  820. tilcdc_crtc->palette_base = dmam_alloc_coherent(dev->dev,
  821. TILCDC_PALETTE_SIZE,
  822. &tilcdc_crtc->palette_dma_handle,
  823. GFP_KERNEL | __GFP_ZERO);
  824. if (!tilcdc_crtc->palette_base)
  825. return -ENOMEM;
  826. *tilcdc_crtc->palette_base = TILCDC_PALETTE_FIRST_ENTRY;
  827. crtc = &tilcdc_crtc->base;
  828. ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
  829. if (ret < 0)
  830. goto fail;
  831. mutex_init(&tilcdc_crtc->enable_lock);
  832. init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
  833. drm_flip_work_init(&tilcdc_crtc->unref_work,
  834. "unref", unref_worker);
  835. spin_lock_init(&tilcdc_crtc->irq_lock);
  836. INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work);
  837. ret = drm_crtc_init_with_planes(dev, crtc,
  838. &tilcdc_crtc->primary,
  839. NULL,
  840. &tilcdc_crtc_funcs,
  841. "tilcdc crtc");
  842. if (ret < 0)
  843. goto fail;
  844. drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
  845. if (priv->is_componentized) {
  846. crtc->port = of_graph_get_port_by_id(dev->dev->of_node, 0);
  847. if (!crtc->port) { /* This should never happen */
  848. dev_err(dev->dev, "Port node not found in %s\n",
  849. dev->dev->of_node->full_name);
  850. ret = -EINVAL;
  851. goto fail;
  852. }
  853. }
  854. priv->crtc = crtc;
  855. return 0;
  856. fail:
  857. tilcdc_crtc_destroy(crtc);
  858. return ret;
  859. }