tilcdc_crtc.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
  1. /*
  2. * Copyright (C) 2012 Texas Instruments
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <drm/drm_atomic.h>
  18. #include <drm/drm_atomic_helper.h>
  19. #include <drm/drm_crtc.h>
  20. #include <drm/drm_flip_work.h>
  21. #include <drm/drm_plane_helper.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/completion.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/of_graph.h>
  26. #include <linux/math64.h>
  27. #include "tilcdc_drv.h"
  28. #include "tilcdc_regs.h"
  29. #define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
  30. #define TILCDC_PALETTE_SIZE 32
  31. #define TILCDC_PALETTE_FIRST_ENTRY 0x4000
  32. struct tilcdc_crtc {
  33. struct drm_crtc base;
  34. struct drm_plane primary;
  35. const struct tilcdc_panel_info *info;
  36. struct drm_pending_vblank_event *event;
  37. struct mutex enable_lock;
  38. bool enabled;
  39. bool shutdown;
  40. wait_queue_head_t frame_done_wq;
  41. bool frame_done;
  42. spinlock_t irq_lock;
  43. unsigned int lcd_fck_rate;
  44. ktime_t last_vblank;
  45. unsigned int hvtotal_us;
  46. struct drm_framebuffer *next_fb;
  47. /* Only set if an external encoder is connected */
  48. bool simulate_vesa_sync;
  49. int sync_lost_count;
  50. bool frame_intact;
  51. struct work_struct recover_work;
  52. dma_addr_t palette_dma_handle;
  53. u16 *palette_base;
  54. struct completion palette_loaded;
  55. };
  56. #define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
  57. static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
  58. {
  59. struct drm_device *dev = crtc->dev;
  60. struct tilcdc_drm_private *priv = dev->dev_private;
  61. struct drm_gem_cma_object *gem;
  62. dma_addr_t start, end;
  63. u64 dma_base_and_ceiling;
  64. gem = drm_fb_cma_get_gem_obj(fb, 0);
  65. start = gem->paddr + fb->offsets[0] +
  66. crtc->y * fb->pitches[0] +
  67. crtc->x * fb->format->cpp[0];
  68. end = start + (crtc->mode.vdisplay * fb->pitches[0]);
  69. /* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG
  70. * with a single insruction, if available. This should make it more
  71. * unlikely that LCDC would fetch the DMA addresses in the middle of
  72. * an update.
  73. */
  74. if (priv->rev == 1)
  75. end -= 1;
  76. dma_base_and_ceiling = (u64)end << 32 | start;
  77. tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
  78. }
  79. /*
  80. * The driver currently only supports only true color formats. For
  81. * true color the palette block is bypassed, but a 32 byte palette
  82. * should still be loaded. The first 16-bit entry must be 0x4000 while
  83. * all other entries must be zeroed.
  84. */
  85. static void tilcdc_crtc_load_palette(struct drm_crtc *crtc)
  86. {
  87. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  88. struct drm_device *dev = crtc->dev;
  89. struct tilcdc_drm_private *priv = dev->dev_private;
  90. int ret;
  91. reinit_completion(&tilcdc_crtc->palette_loaded);
  92. /* Tell the LCDC where the palette is located. */
  93. tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG,
  94. tilcdc_crtc->palette_dma_handle);
  95. tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG,
  96. (u32) tilcdc_crtc->palette_dma_handle +
  97. TILCDC_PALETTE_SIZE - 1);
  98. /* Set dma load mode for palette loading only. */
  99. tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
  100. LCDC_PALETTE_LOAD_MODE(PALETTE_ONLY),
  101. LCDC_PALETTE_LOAD_MODE_MASK);
  102. /* Enable DMA Palette Loaded Interrupt */
  103. if (priv->rev == 1)
  104. tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
  105. else
  106. tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_PL_INT_ENA);
  107. /* Enable LCDC DMA and wait for palette to be loaded. */
  108. tilcdc_clear_irqstatus(dev, 0xffffffff);
  109. tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  110. ret = wait_for_completion_timeout(&tilcdc_crtc->palette_loaded,
  111. msecs_to_jiffies(50));
  112. if (ret == 0)
  113. dev_err(dev->dev, "%s: Palette loading timeout", __func__);
  114. /* Disable LCDC DMA and DMA Palette Loaded Interrupt. */
  115. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  116. if (priv->rev == 1)
  117. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
  118. else
  119. tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_V2_PL_INT_ENA);
  120. }
  121. static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
  122. {
  123. struct tilcdc_drm_private *priv = dev->dev_private;
  124. tilcdc_clear_irqstatus(dev, 0xffffffff);
  125. if (priv->rev == 1) {
  126. tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
  127. LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
  128. LCDC_V1_UNDERFLOW_INT_ENA);
  129. tilcdc_set(dev, LCDC_DMA_CTRL_REG,
  130. LCDC_V1_END_OF_FRAME_INT_ENA);
  131. } else {
  132. tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
  133. LCDC_V2_UNDERFLOW_INT_ENA |
  134. LCDC_V2_END_OF_FRAME0_INT_ENA |
  135. LCDC_FRAME_DONE | LCDC_SYNC_LOST);
  136. }
  137. }
  138. static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
  139. {
  140. struct tilcdc_drm_private *priv = dev->dev_private;
  141. /* disable irqs that we might have enabled: */
  142. if (priv->rev == 1) {
  143. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
  144. LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
  145. LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
  146. tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
  147. LCDC_V1_END_OF_FRAME_INT_ENA);
  148. } else {
  149. tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
  150. LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
  151. LCDC_V2_END_OF_FRAME0_INT_ENA |
  152. LCDC_FRAME_DONE | LCDC_SYNC_LOST);
  153. }
  154. }
  155. static void reset(struct drm_crtc *crtc)
  156. {
  157. struct drm_device *dev = crtc->dev;
  158. struct tilcdc_drm_private *priv = dev->dev_private;
  159. if (priv->rev != 2)
  160. return;
  161. tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
  162. usleep_range(250, 1000);
  163. tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
  164. }
  165. /*
  166. * Calculate the percentage difference between the requested pixel clock rate
  167. * and the effective rate resulting from calculating the clock divider value.
  168. */
  169. static unsigned int tilcdc_pclk_diff(unsigned long rate,
  170. unsigned long real_rate)
  171. {
  172. int r = rate / 100, rr = real_rate / 100;
  173. return (unsigned int)(abs(((rr - r) * 100) / r));
  174. }
  175. static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
  176. {
  177. struct drm_device *dev = crtc->dev;
  178. struct tilcdc_drm_private *priv = dev->dev_private;
  179. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  180. unsigned long clk_rate, real_rate, req_rate;
  181. unsigned int clkdiv;
  182. int ret;
  183. clkdiv = 2; /* first try using a standard divider of 2 */
  184. /* mode.clock is in KHz, set_rate wants parameter in Hz */
  185. req_rate = crtc->mode.clock * 1000;
  186. ret = clk_set_rate(priv->clk, req_rate * clkdiv);
  187. clk_rate = clk_get_rate(priv->clk);
  188. if (ret < 0) {
  189. /*
  190. * If we fail to set the clock rate (some architectures don't
  191. * use the common clock framework yet and may not implement
  192. * all the clk API calls for every clock), try the next best
  193. * thing: adjusting the clock divider, unless clk_get_rate()
  194. * failed as well.
  195. */
  196. if (!clk_rate) {
  197. /* Nothing more we can do. Just bail out. */
  198. dev_err(dev->dev,
  199. "failed to set the pixel clock - unable to read current lcdc clock rate\n");
  200. return;
  201. }
  202. clkdiv = DIV_ROUND_CLOSEST(clk_rate, req_rate);
  203. /*
  204. * Emit a warning if the real clock rate resulting from the
  205. * calculated divider differs much from the requested rate.
  206. *
  207. * 5% is an arbitrary value - LCDs are usually quite tolerant
  208. * about pixel clock rates.
  209. */
  210. real_rate = clkdiv * req_rate;
  211. if (tilcdc_pclk_diff(clk_rate, real_rate) > 5) {
  212. dev_warn(dev->dev,
  213. "effective pixel clock rate (%luHz) differs from the calculated rate (%luHz)\n",
  214. clk_rate, real_rate);
  215. }
  216. }
  217. tilcdc_crtc->lcd_fck_rate = clk_rate;
  218. DBG("lcd_clk=%u, mode clock=%d, div=%u",
  219. tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
  220. /* Configure the LCD clock divisor. */
  221. tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
  222. LCDC_RASTER_MODE);
  223. if (priv->rev == 2)
  224. tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
  225. LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
  226. LCDC_V2_CORE_CLK_EN);
  227. }
  228. static uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
  229. {
  230. return (uint) div_u64(1000llu * mode->htotal * mode->vtotal,
  231. mode->clock);
  232. }
  233. static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
  234. {
  235. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  236. struct drm_device *dev = crtc->dev;
  237. struct tilcdc_drm_private *priv = dev->dev_private;
  238. const struct tilcdc_panel_info *info = tilcdc_crtc->info;
  239. uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
  240. struct drm_display_mode *mode = &crtc->state->adjusted_mode;
  241. struct drm_framebuffer *fb = crtc->primary->state->fb;
  242. if (WARN_ON(!info))
  243. return;
  244. if (WARN_ON(!fb))
  245. return;
  246. /* Configure the Burst Size and fifo threshold of DMA: */
  247. reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
  248. switch (info->dma_burst_sz) {
  249. case 1:
  250. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1);
  251. break;
  252. case 2:
  253. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2);
  254. break;
  255. case 4:
  256. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4);
  257. break;
  258. case 8:
  259. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8);
  260. break;
  261. case 16:
  262. reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
  263. break;
  264. default:
  265. dev_err(dev->dev, "invalid burst size\n");
  266. return;
  267. }
  268. reg |= (info->fifo_th << 8);
  269. tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
  270. /* Configure timings: */
  271. hbp = mode->htotal - mode->hsync_end;
  272. hfp = mode->hsync_start - mode->hdisplay;
  273. hsw = mode->hsync_end - mode->hsync_start;
  274. vbp = mode->vtotal - mode->vsync_end;
  275. vfp = mode->vsync_start - mode->vdisplay;
  276. vsw = mode->vsync_end - mode->vsync_start;
  277. DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
  278. mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
  279. /* Set AC Bias Period and Number of Transitions per Interrupt: */
  280. reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
  281. reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
  282. LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
  283. /*
  284. * subtract one from hfp, hbp, hsw because the hardware uses
  285. * a value of 0 as 1
  286. */
  287. if (priv->rev == 2) {
  288. /* clear bits we're going to set */
  289. reg &= ~0x78000033;
  290. reg |= ((hfp-1) & 0x300) >> 8;
  291. reg |= ((hbp-1) & 0x300) >> 4;
  292. reg |= ((hsw-1) & 0x3c0) << 21;
  293. }
  294. tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
  295. reg = (((mode->hdisplay >> 4) - 1) << 4) |
  296. (((hbp-1) & 0xff) << 24) |
  297. (((hfp-1) & 0xff) << 16) |
  298. (((hsw-1) & 0x3f) << 10);
  299. if (priv->rev == 2)
  300. reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
  301. tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
  302. reg = ((mode->vdisplay - 1) & 0x3ff) |
  303. ((vbp & 0xff) << 24) |
  304. ((vfp & 0xff) << 16) |
  305. (((vsw-1) & 0x3f) << 10);
  306. tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
  307. /*
  308. * be sure to set Bit 10 for the V2 LCDC controller,
  309. * otherwise limited to 1024 pixels width, stopping
  310. * 1920x1080 being supported.
  311. */
  312. if (priv->rev == 2) {
  313. if ((mode->vdisplay - 1) & 0x400) {
  314. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG,
  315. LCDC_LPP_B10);
  316. } else {
  317. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG,
  318. LCDC_LPP_B10);
  319. }
  320. }
  321. /* Configure display type: */
  322. reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
  323. ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
  324. LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK |
  325. 0x000ff000 /* Palette Loading Delay bits */);
  326. reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
  327. if (info->tft_alt_mode)
  328. reg |= LCDC_TFT_ALT_ENABLE;
  329. if (priv->rev == 2) {
  330. switch (fb->format->format) {
  331. case DRM_FORMAT_BGR565:
  332. case DRM_FORMAT_RGB565:
  333. break;
  334. case DRM_FORMAT_XBGR8888:
  335. case DRM_FORMAT_XRGB8888:
  336. reg |= LCDC_V2_TFT_24BPP_UNPACK;
  337. /* fallthrough */
  338. case DRM_FORMAT_BGR888:
  339. case DRM_FORMAT_RGB888:
  340. reg |= LCDC_V2_TFT_24BPP_MODE;
  341. break;
  342. default:
  343. dev_err(dev->dev, "invalid pixel format\n");
  344. return;
  345. }
  346. }
  347. reg |= info->fdd < 12;
  348. tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
  349. if (info->invert_pxl_clk)
  350. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
  351. else
  352. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
  353. if (info->sync_ctrl)
  354. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
  355. else
  356. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
  357. if (info->sync_edge)
  358. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
  359. else
  360. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
  361. if (mode->flags & DRM_MODE_FLAG_NHSYNC)
  362. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
  363. else
  364. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
  365. if (mode->flags & DRM_MODE_FLAG_NVSYNC)
  366. tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
  367. else
  368. tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
  369. if (info->raster_order)
  370. tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
  371. else
  372. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
  373. tilcdc_crtc_set_clk(crtc);
  374. tilcdc_crtc_load_palette(crtc);
  375. set_scanout(crtc, fb);
  376. crtc->hwmode = crtc->state->adjusted_mode;
  377. tilcdc_crtc->hvtotal_us =
  378. tilcdc_mode_hvtotal(&crtc->hwmode);
  379. }
  380. static void tilcdc_crtc_enable(struct drm_crtc *crtc)
  381. {
  382. struct drm_device *dev = crtc->dev;
  383. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  384. unsigned long flags;
  385. mutex_lock(&tilcdc_crtc->enable_lock);
  386. if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) {
  387. mutex_unlock(&tilcdc_crtc->enable_lock);
  388. return;
  389. }
  390. pm_runtime_get_sync(dev->dev);
  391. reset(crtc);
  392. tilcdc_crtc_set_mode(crtc);
  393. tilcdc_crtc_enable_irqs(dev);
  394. tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
  395. tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
  396. LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
  397. LCDC_PALETTE_LOAD_MODE_MASK);
  398. /* There is no real chance for a race here as the time stamp
  399. * is taken before the raster DMA is started. The spin-lock is
  400. * taken to have a memory barrier after taking the time-stamp
  401. * and to avoid a context switch between taking the stamp and
  402. * enabling the raster.
  403. */
  404. spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
  405. tilcdc_crtc->last_vblank = ktime_get();
  406. tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  407. spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
  408. drm_crtc_vblank_on(crtc);
  409. tilcdc_crtc->enabled = true;
  410. mutex_unlock(&tilcdc_crtc->enable_lock);
  411. }
  412. static void tilcdc_crtc_atomic_enable(struct drm_crtc *crtc,
  413. struct drm_crtc_state *old_state)
  414. {
  415. tilcdc_crtc_enable(crtc);
  416. }
  417. static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
  418. {
  419. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  420. struct drm_device *dev = crtc->dev;
  421. int ret;
  422. mutex_lock(&tilcdc_crtc->enable_lock);
  423. if (shutdown)
  424. tilcdc_crtc->shutdown = true;
  425. if (!tilcdc_crtc->enabled) {
  426. mutex_unlock(&tilcdc_crtc->enable_lock);
  427. return;
  428. }
  429. tilcdc_crtc->frame_done = false;
  430. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  431. /*
  432. * Wait for framedone irq which will still come before putting
  433. * things to sleep..
  434. */
  435. ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
  436. tilcdc_crtc->frame_done,
  437. msecs_to_jiffies(500));
  438. if (ret == 0)
  439. dev_err(dev->dev, "%s: timeout waiting for framedone\n",
  440. __func__);
  441. drm_crtc_vblank_off(crtc);
  442. tilcdc_crtc_disable_irqs(dev);
  443. pm_runtime_put_sync(dev->dev);
  444. tilcdc_crtc->enabled = false;
  445. mutex_unlock(&tilcdc_crtc->enable_lock);
  446. }
  447. static void tilcdc_crtc_disable(struct drm_crtc *crtc)
  448. {
  449. tilcdc_crtc_off(crtc, false);
  450. }
  451. static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc,
  452. struct drm_crtc_state *old_state)
  453. {
  454. tilcdc_crtc_disable(crtc);
  455. }
  456. void tilcdc_crtc_shutdown(struct drm_crtc *crtc)
  457. {
  458. tilcdc_crtc_off(crtc, true);
  459. }
  460. static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
  461. {
  462. return crtc->state && crtc->state->enable && crtc->state->active;
  463. }
  464. static void tilcdc_crtc_recover_work(struct work_struct *work)
  465. {
  466. struct tilcdc_crtc *tilcdc_crtc =
  467. container_of(work, struct tilcdc_crtc, recover_work);
  468. struct drm_crtc *crtc = &tilcdc_crtc->base;
  469. dev_info(crtc->dev->dev, "%s: Reset CRTC", __func__);
  470. drm_modeset_lock(&crtc->mutex, NULL);
  471. if (!tilcdc_crtc_is_on(crtc))
  472. goto out;
  473. tilcdc_crtc_disable(crtc);
  474. tilcdc_crtc_enable(crtc);
  475. out:
  476. drm_modeset_unlock(&crtc->mutex);
  477. }
  478. static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
  479. {
  480. struct tilcdc_drm_private *priv = crtc->dev->dev_private;
  481. tilcdc_crtc_shutdown(crtc);
  482. flush_workqueue(priv->wq);
  483. of_node_put(crtc->port);
  484. drm_crtc_cleanup(crtc);
  485. }
  486. int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
  487. struct drm_framebuffer *fb,
  488. struct drm_pending_vblank_event *event)
  489. {
  490. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  491. struct drm_device *dev = crtc->dev;
  492. if (tilcdc_crtc->event) {
  493. dev_err(dev->dev, "already pending page flip!\n");
  494. return -EBUSY;
  495. }
  496. tilcdc_crtc->event = event;
  497. mutex_lock(&tilcdc_crtc->enable_lock);
  498. if (tilcdc_crtc->enabled) {
  499. unsigned long flags;
  500. ktime_t next_vblank;
  501. s64 tdiff;
  502. spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
  503. next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
  504. tilcdc_crtc->hvtotal_us);
  505. tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
  506. if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
  507. tilcdc_crtc->next_fb = fb;
  508. else
  509. set_scanout(crtc, fb);
  510. spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
  511. }
  512. mutex_unlock(&tilcdc_crtc->enable_lock);
  513. return 0;
  514. }
  515. static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
  516. const struct drm_display_mode *mode,
  517. struct drm_display_mode *adjusted_mode)
  518. {
  519. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  520. if (!tilcdc_crtc->simulate_vesa_sync)
  521. return true;
  522. /*
  523. * tilcdc does not generate VESA-compliant sync but aligns
  524. * VS on the second edge of HS instead of first edge.
  525. * We use adjusted_mode, to fixup sync by aligning both rising
  526. * edges and add HSKEW offset to fix the sync.
  527. */
  528. adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
  529. adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
  530. if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
  531. adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
  532. adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
  533. } else {
  534. adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
  535. adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
  536. }
  537. return true;
  538. }
  539. static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
  540. struct drm_crtc_state *state)
  541. {
  542. struct drm_display_mode *mode = &state->mode;
  543. int ret;
  544. /* If we are not active we don't care */
  545. if (!state->active)
  546. return 0;
  547. if (state->state->planes[0].ptr != crtc->primary ||
  548. state->state->planes[0].state == NULL ||
  549. state->state->planes[0].state->crtc != crtc) {
  550. dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
  551. return -EINVAL;
  552. }
  553. ret = tilcdc_crtc_mode_valid(crtc, mode);
  554. if (ret) {
  555. dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name);
  556. return -EINVAL;
  557. }
  558. return 0;
  559. }
  560. static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc)
  561. {
  562. return 0;
  563. }
  564. static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc)
  565. {
  566. }
  567. static void tilcdc_crtc_reset(struct drm_crtc *crtc)
  568. {
  569. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  570. struct drm_device *dev = crtc->dev;
  571. int ret;
  572. drm_atomic_helper_crtc_reset(crtc);
  573. /* Turn the raster off if it for some reason is on. */
  574. pm_runtime_get_sync(dev->dev);
  575. if (tilcdc_read(dev, LCDC_RASTER_CTRL_REG) & LCDC_RASTER_ENABLE) {
  576. /* Enable DMA Frame Done Interrupt */
  577. tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_FRAME_DONE);
  578. tilcdc_clear_irqstatus(dev, 0xffffffff);
  579. tilcdc_crtc->frame_done = false;
  580. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
  581. ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
  582. tilcdc_crtc->frame_done,
  583. msecs_to_jiffies(500));
  584. if (ret == 0)
  585. dev_err(dev->dev, "%s: timeout waiting for framedone\n",
  586. __func__);
  587. }
  588. pm_runtime_put_sync(dev->dev);
  589. }
  590. static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
  591. .destroy = tilcdc_crtc_destroy,
  592. .set_config = drm_atomic_helper_set_config,
  593. .page_flip = drm_atomic_helper_page_flip,
  594. .reset = tilcdc_crtc_reset,
  595. .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
  596. .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
  597. .enable_vblank = tilcdc_crtc_enable_vblank,
  598. .disable_vblank = tilcdc_crtc_disable_vblank,
  599. };
  600. static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
  601. .mode_fixup = tilcdc_crtc_mode_fixup,
  602. .atomic_check = tilcdc_crtc_atomic_check,
  603. .atomic_enable = tilcdc_crtc_atomic_enable,
  604. .atomic_disable = tilcdc_crtc_atomic_disable,
  605. };
  606. int tilcdc_crtc_max_width(struct drm_crtc *crtc)
  607. {
  608. struct drm_device *dev = crtc->dev;
  609. struct tilcdc_drm_private *priv = dev->dev_private;
  610. int max_width = 0;
  611. if (priv->rev == 1)
  612. max_width = 1024;
  613. else if (priv->rev == 2)
  614. max_width = 2048;
  615. return max_width;
  616. }
  617. int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
  618. {
  619. struct tilcdc_drm_private *priv = crtc->dev->dev_private;
  620. unsigned int bandwidth;
  621. uint32_t hbp, hfp, hsw, vbp, vfp, vsw;
  622. /*
  623. * check to see if the width is within the range that
  624. * the LCD Controller physically supports
  625. */
  626. if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
  627. return MODE_VIRTUAL_X;
  628. /* width must be multiple of 16 */
  629. if (mode->hdisplay & 0xf)
  630. return MODE_VIRTUAL_X;
  631. if (mode->vdisplay > 2048)
  632. return MODE_VIRTUAL_Y;
  633. DBG("Processing mode %dx%d@%d with pixel clock %d",
  634. mode->hdisplay, mode->vdisplay,
  635. drm_mode_vrefresh(mode), mode->clock);
  636. hbp = mode->htotal - mode->hsync_end;
  637. hfp = mode->hsync_start - mode->hdisplay;
  638. hsw = mode->hsync_end - mode->hsync_start;
  639. vbp = mode->vtotal - mode->vsync_end;
  640. vfp = mode->vsync_start - mode->vdisplay;
  641. vsw = mode->vsync_end - mode->vsync_start;
  642. if ((hbp-1) & ~0x3ff) {
  643. DBG("Pruning mode: Horizontal Back Porch out of range");
  644. return MODE_HBLANK_WIDE;
  645. }
  646. if ((hfp-1) & ~0x3ff) {
  647. DBG("Pruning mode: Horizontal Front Porch out of range");
  648. return MODE_HBLANK_WIDE;
  649. }
  650. if ((hsw-1) & ~0x3ff) {
  651. DBG("Pruning mode: Horizontal Sync Width out of range");
  652. return MODE_HSYNC_WIDE;
  653. }
  654. if (vbp & ~0xff) {
  655. DBG("Pruning mode: Vertical Back Porch out of range");
  656. return MODE_VBLANK_WIDE;
  657. }
  658. if (vfp & ~0xff) {
  659. DBG("Pruning mode: Vertical Front Porch out of range");
  660. return MODE_VBLANK_WIDE;
  661. }
  662. if ((vsw-1) & ~0x3f) {
  663. DBG("Pruning mode: Vertical Sync Width out of range");
  664. return MODE_VSYNC_WIDE;
  665. }
  666. /*
  667. * some devices have a maximum allowed pixel clock
  668. * configured from the DT
  669. */
  670. if (mode->clock > priv->max_pixelclock) {
  671. DBG("Pruning mode: pixel clock too high");
  672. return MODE_CLOCK_HIGH;
  673. }
  674. /*
  675. * some devices further limit the max horizontal resolution
  676. * configured from the DT
  677. */
  678. if (mode->hdisplay > priv->max_width)
  679. return MODE_BAD_WIDTH;
  680. /* filter out modes that would require too much memory bandwidth: */
  681. bandwidth = mode->hdisplay * mode->vdisplay *
  682. drm_mode_vrefresh(mode);
  683. if (bandwidth > priv->max_bandwidth) {
  684. DBG("Pruning mode: exceeds defined bandwidth limit");
  685. return MODE_BAD;
  686. }
  687. return MODE_OK;
  688. }
  689. void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
  690. const struct tilcdc_panel_info *info)
  691. {
  692. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  693. tilcdc_crtc->info = info;
  694. }
  695. void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
  696. bool simulate_vesa_sync)
  697. {
  698. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  699. tilcdc_crtc->simulate_vesa_sync = simulate_vesa_sync;
  700. }
  701. void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
  702. {
  703. struct drm_device *dev = crtc->dev;
  704. struct tilcdc_drm_private *priv = dev->dev_private;
  705. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  706. drm_modeset_lock(&crtc->mutex, NULL);
  707. if (tilcdc_crtc->lcd_fck_rate != clk_get_rate(priv->clk)) {
  708. if (tilcdc_crtc_is_on(crtc)) {
  709. pm_runtime_get_sync(dev->dev);
  710. tilcdc_crtc_disable(crtc);
  711. tilcdc_crtc_set_clk(crtc);
  712. tilcdc_crtc_enable(crtc);
  713. pm_runtime_put_sync(dev->dev);
  714. }
  715. }
  716. drm_modeset_unlock(&crtc->mutex);
  717. }
  718. #define SYNC_LOST_COUNT_LIMIT 50
  719. irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
  720. {
  721. struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
  722. struct drm_device *dev = crtc->dev;
  723. struct tilcdc_drm_private *priv = dev->dev_private;
  724. uint32_t stat, reg;
  725. stat = tilcdc_read_irqstatus(dev);
  726. tilcdc_clear_irqstatus(dev, stat);
  727. if (stat & LCDC_END_OF_FRAME0) {
  728. unsigned long flags;
  729. bool skip_event = false;
  730. ktime_t now;
  731. now = ktime_get();
  732. spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
  733. tilcdc_crtc->last_vblank = now;
  734. if (tilcdc_crtc->next_fb) {
  735. set_scanout(crtc, tilcdc_crtc->next_fb);
  736. tilcdc_crtc->next_fb = NULL;
  737. skip_event = true;
  738. }
  739. spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
  740. drm_crtc_handle_vblank(crtc);
  741. if (!skip_event) {
  742. struct drm_pending_vblank_event *event;
  743. spin_lock_irqsave(&dev->event_lock, flags);
  744. event = tilcdc_crtc->event;
  745. tilcdc_crtc->event = NULL;
  746. if (event)
  747. drm_crtc_send_vblank_event(crtc, event);
  748. spin_unlock_irqrestore(&dev->event_lock, flags);
  749. }
  750. if (tilcdc_crtc->frame_intact)
  751. tilcdc_crtc->sync_lost_count = 0;
  752. else
  753. tilcdc_crtc->frame_intact = true;
  754. }
  755. if (stat & LCDC_FIFO_UNDERFLOW)
  756. dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underflow",
  757. __func__, stat);
  758. if (stat & LCDC_PL_LOAD_DONE) {
  759. complete(&tilcdc_crtc->palette_loaded);
  760. if (priv->rev == 1)
  761. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
  762. LCDC_V1_PL_INT_ENA);
  763. else
  764. tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
  765. LCDC_V2_PL_INT_ENA);
  766. }
  767. if (stat & LCDC_SYNC_LOST) {
  768. dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
  769. __func__, stat);
  770. tilcdc_crtc->frame_intact = false;
  771. if (priv->rev == 1) {
  772. reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
  773. if (reg & LCDC_RASTER_ENABLE) {
  774. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
  775. LCDC_RASTER_ENABLE);
  776. tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
  777. LCDC_RASTER_ENABLE);
  778. }
  779. } else {
  780. if (tilcdc_crtc->sync_lost_count++ >
  781. SYNC_LOST_COUNT_LIMIT) {
  782. dev_err(dev->dev,
  783. "%s(0x%08x): Sync lost flood detected, recovering",
  784. __func__, stat);
  785. queue_work(system_wq,
  786. &tilcdc_crtc->recover_work);
  787. tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
  788. LCDC_SYNC_LOST);
  789. tilcdc_crtc->sync_lost_count = 0;
  790. }
  791. }
  792. }
  793. if (stat & LCDC_FRAME_DONE) {
  794. tilcdc_crtc->frame_done = true;
  795. wake_up(&tilcdc_crtc->frame_done_wq);
  796. /* rev 1 lcdc appears to hang if irq is not disbaled here */
  797. if (priv->rev == 1)
  798. tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
  799. LCDC_V1_FRAME_DONE_INT_ENA);
  800. }
  801. /* For revision 2 only */
  802. if (priv->rev == 2) {
  803. /* Indicate to LCDC that the interrupt service routine has
  804. * completed, see 13.3.6.1.6 in AM335x TRM.
  805. */
  806. tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
  807. }
  808. return IRQ_HANDLED;
  809. }
  810. int tilcdc_crtc_create(struct drm_device *dev)
  811. {
  812. struct tilcdc_drm_private *priv = dev->dev_private;
  813. struct tilcdc_crtc *tilcdc_crtc;
  814. struct drm_crtc *crtc;
  815. int ret;
  816. tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
  817. if (!tilcdc_crtc) {
  818. dev_err(dev->dev, "allocation failed\n");
  819. return -ENOMEM;
  820. }
  821. init_completion(&tilcdc_crtc->palette_loaded);
  822. tilcdc_crtc->palette_base = dmam_alloc_coherent(dev->dev,
  823. TILCDC_PALETTE_SIZE,
  824. &tilcdc_crtc->palette_dma_handle,
  825. GFP_KERNEL | __GFP_ZERO);
  826. if (!tilcdc_crtc->palette_base)
  827. return -ENOMEM;
  828. *tilcdc_crtc->palette_base = TILCDC_PALETTE_FIRST_ENTRY;
  829. crtc = &tilcdc_crtc->base;
  830. ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
  831. if (ret < 0)
  832. goto fail;
  833. mutex_init(&tilcdc_crtc->enable_lock);
  834. init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
  835. spin_lock_init(&tilcdc_crtc->irq_lock);
  836. INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work);
  837. ret = drm_crtc_init_with_planes(dev, crtc,
  838. &tilcdc_crtc->primary,
  839. NULL,
  840. &tilcdc_crtc_funcs,
  841. "tilcdc crtc");
  842. if (ret < 0)
  843. goto fail;
  844. drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
  845. if (priv->is_componentized) {
  846. crtc->port = of_graph_get_port_by_id(dev->dev->of_node, 0);
  847. if (!crtc->port) { /* This should never happen */
  848. dev_err(dev->dev, "Port node not found in %pOF\n",
  849. dev->dev->of_node);
  850. ret = -EINVAL;
  851. goto fail;
  852. }
  853. }
  854. priv->crtc = crtc;
  855. return 0;
  856. fail:
  857. tilcdc_crtc_destroy(crtc);
  858. return ret;
  859. }