mdp5_crtc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "mdp5_kms.h"
  18. #include <drm/drm_mode.h>
  19. #include "drm_crtc.h"
  20. #include "drm_crtc_helper.h"
  21. #include "drm_flip_work.h"
  22. struct mdp5_crtc {
  23. struct drm_crtc base;
  24. char name[8];
  25. struct drm_plane *plane;
  26. struct drm_plane *planes[8];
  27. int id;
  28. bool enabled;
  29. /* which mixer/encoder we route output to: */
  30. int mixer;
  31. /* if there is a pending flip, these will be non-null: */
  32. struct drm_pending_vblank_event *event;
  33. struct msm_fence_cb pageflip_cb;
  34. #define PENDING_CURSOR 0x1
  35. #define PENDING_FLIP 0x2
  36. atomic_t pending;
  37. /* the fb that we logically (from PoV of KMS API) hold a ref
  38. * to. Which we may not yet be scanning out (we may still
  39. * be scanning out previous in case of page_flip while waiting
  40. * for gpu rendering to complete:
  41. */
  42. struct drm_framebuffer *fb;
  43. /* the fb that we currently hold a scanout ref to: */
  44. struct drm_framebuffer *scanout_fb;
  45. /* for unref'ing framebuffers after scanout completes: */
  46. struct drm_flip_work unref_fb_work;
  47. struct mdp_irq vblank;
  48. struct mdp_irq err;
  49. };
  50. #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
  51. static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
  52. {
  53. struct msm_drm_private *priv = crtc->dev->dev_private;
  54. return to_mdp5_kms(to_mdp_kms(priv->kms));
  55. }
  56. static void request_pending(struct drm_crtc *crtc, uint32_t pending)
  57. {
  58. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  59. atomic_or(pending, &mdp5_crtc->pending);
  60. mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
  61. }
  62. static void crtc_flush(struct drm_crtc *crtc)
  63. {
  64. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  65. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  66. int id = mdp5_crtc->id;
  67. uint32_t i, flush = 0;
  68. for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
  69. struct drm_plane *plane = mdp5_crtc->planes[i];
  70. if (plane) {
  71. enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
  72. flush |= pipe2flush(pipe);
  73. }
  74. }
  75. flush |= mixer2flush(mdp5_crtc->id);
  76. flush |= MDP5_CTL_FLUSH_CTL;
  77. DBG("%s: flush=%08x", mdp5_crtc->name, flush);
  78. mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
  79. }
  80. static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
  81. {
  82. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  83. struct drm_framebuffer *old_fb = mdp5_crtc->fb;
  84. /* grab reference to incoming scanout fb: */
  85. drm_framebuffer_reference(new_fb);
  86. mdp5_crtc->base.fb = new_fb;
  87. mdp5_crtc->fb = new_fb;
  88. if (old_fb)
  89. drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
  90. }
  91. /* unlike update_fb(), take a ref to the new scanout fb *before* updating
  92. * plane, then call this. Needed to ensure we don't unref the buffer that
  93. * is actually still being scanned out.
  94. *
  95. * Note that this whole thing goes away with atomic.. since we can defer
  96. * calling into driver until rendering is done.
  97. */
  98. static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
  99. {
  100. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  101. /* flush updates, to make sure hw is updated to new scanout fb,
  102. * so that we can safely queue unref to current fb (ie. next
  103. * vblank we know hw is done w/ previous scanout_fb).
  104. */
  105. crtc_flush(crtc);
  106. if (mdp5_crtc->scanout_fb)
  107. drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
  108. mdp5_crtc->scanout_fb);
  109. mdp5_crtc->scanout_fb = fb;
  110. /* enable vblank to complete flip: */
  111. request_pending(crtc, PENDING_FLIP);
  112. }
  113. /* if file!=NULL, this is preclose potential cancel-flip path */
  114. static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
  115. {
  116. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  117. struct drm_device *dev = crtc->dev;
  118. struct drm_pending_vblank_event *event;
  119. unsigned long flags, i;
  120. spin_lock_irqsave(&dev->event_lock, flags);
  121. event = mdp5_crtc->event;
  122. if (event) {
  123. /* if regular vblank case (!file) or if cancel-flip from
  124. * preclose on file that requested flip, then send the
  125. * event:
  126. */
  127. if (!file || (event->base.file_priv == file)) {
  128. mdp5_crtc->event = NULL;
  129. drm_send_vblank_event(dev, mdp5_crtc->id, event);
  130. }
  131. }
  132. spin_unlock_irqrestore(&dev->event_lock, flags);
  133. for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
  134. struct drm_plane *plane = mdp5_crtc->planes[i];
  135. if (plane)
  136. mdp5_plane_complete_flip(plane);
  137. }
  138. }
  139. static void pageflip_cb(struct msm_fence_cb *cb)
  140. {
  141. struct mdp5_crtc *mdp5_crtc =
  142. container_of(cb, struct mdp5_crtc, pageflip_cb);
  143. struct drm_crtc *crtc = &mdp5_crtc->base;
  144. struct drm_framebuffer *fb = mdp5_crtc->fb;
  145. if (!fb)
  146. return;
  147. drm_framebuffer_reference(fb);
  148. mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
  149. update_scanout(crtc, fb);
  150. }
  151. static void unref_fb_worker(struct drm_flip_work *work, void *val)
  152. {
  153. struct mdp5_crtc *mdp5_crtc =
  154. container_of(work, struct mdp5_crtc, unref_fb_work);
  155. struct drm_device *dev = mdp5_crtc->base.dev;
  156. mutex_lock(&dev->mode_config.mutex);
  157. drm_framebuffer_unreference(val);
  158. mutex_unlock(&dev->mode_config.mutex);
  159. }
  160. static void mdp5_crtc_destroy(struct drm_crtc *crtc)
  161. {
  162. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  163. mdp5_crtc->plane->funcs->destroy(mdp5_crtc->plane);
  164. drm_crtc_cleanup(crtc);
  165. drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
  166. kfree(mdp5_crtc);
  167. }
  168. static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
  169. {
  170. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  171. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  172. bool enabled = (mode == DRM_MODE_DPMS_ON);
  173. DBG("%s: mode=%d", mdp5_crtc->name, mode);
  174. if (enabled != mdp5_crtc->enabled) {
  175. if (enabled) {
  176. mdp5_enable(mdp5_kms);
  177. mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
  178. } else {
  179. mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
  180. mdp5_disable(mdp5_kms);
  181. }
  182. mdp5_crtc->enabled = enabled;
  183. }
  184. }
  185. static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
  186. const struct drm_display_mode *mode,
  187. struct drm_display_mode *adjusted_mode)
  188. {
  189. return true;
  190. }
  191. static void blend_setup(struct drm_crtc *crtc)
  192. {
  193. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  194. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  195. int id = mdp5_crtc->id;
  196. /*
  197. * Hard-coded setup for now until I figure out how the
  198. * layer-mixer works
  199. */
  200. /* LM[id]: */
  201. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id),
  202. MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA);
  203. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
  204. MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
  205. MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
  206. MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
  207. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
  208. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
  209. /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
  210. * we want to be setting CTL[m].LAYER[n]. Not sure what the
  211. * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
  212. * used when chaining up mixers for high resolution displays?
  213. */
  214. /* CTL[id]: */
  215. mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0),
  216. MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
  217. MDP5_CTL_LAYER_REG_BORDER_COLOR);
  218. mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0);
  219. mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
  220. mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
  221. mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
  222. }
  223. static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
  224. struct drm_display_mode *mode,
  225. struct drm_display_mode *adjusted_mode,
  226. int x, int y,
  227. struct drm_framebuffer *old_fb)
  228. {
  229. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  230. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  231. int ret;
  232. mode = adjusted_mode;
  233. DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
  234. mdp5_crtc->name, mode->base.id, mode->name,
  235. mode->vrefresh, mode->clock,
  236. mode->hdisplay, mode->hsync_start,
  237. mode->hsync_end, mode->htotal,
  238. mode->vdisplay, mode->vsync_start,
  239. mode->vsync_end, mode->vtotal,
  240. mode->type, mode->flags);
  241. /* grab extra ref for update_scanout() */
  242. drm_framebuffer_reference(crtc->fb);
  243. ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->fb,
  244. 0, 0, mode->hdisplay, mode->vdisplay,
  245. x << 16, y << 16,
  246. mode->hdisplay << 16, mode->vdisplay << 16);
  247. if (ret) {
  248. drm_framebuffer_unreference(crtc->fb);
  249. dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
  250. mdp5_crtc->name, ret);
  251. return ret;
  252. }
  253. mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
  254. MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
  255. MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
  256. update_fb(crtc, crtc->fb);
  257. update_scanout(crtc, crtc->fb);
  258. return 0;
  259. }
  260. static void mdp5_crtc_prepare(struct drm_crtc *crtc)
  261. {
  262. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  263. DBG("%s", mdp5_crtc->name);
  264. /* make sure we hold a ref to mdp clks while setting up mode: */
  265. mdp5_enable(get_kms(crtc));
  266. mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
  267. }
  268. static void mdp5_crtc_commit(struct drm_crtc *crtc)
  269. {
  270. mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
  271. crtc_flush(crtc);
  272. /* drop the ref to mdp clk's that we got in prepare: */
  273. mdp5_disable(get_kms(crtc));
  274. }
  275. static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
  276. struct drm_framebuffer *old_fb)
  277. {
  278. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  279. struct drm_plane *plane = mdp5_crtc->plane;
  280. struct drm_display_mode *mode = &crtc->mode;
  281. int ret;
  282. /* grab extra ref for update_scanout() */
  283. drm_framebuffer_reference(crtc->fb);
  284. ret = mdp5_plane_mode_set(plane, crtc, crtc->fb,
  285. 0, 0, mode->hdisplay, mode->vdisplay,
  286. x << 16, y << 16,
  287. mode->hdisplay << 16, mode->vdisplay << 16);
  288. if (ret) {
  289. drm_framebuffer_unreference(crtc->fb);
  290. return ret;
  291. }
  292. update_fb(crtc, crtc->fb);
  293. update_scanout(crtc, crtc->fb);
  294. return 0;
  295. }
  296. static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
  297. {
  298. }
  299. static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
  300. struct drm_framebuffer *new_fb,
  301. struct drm_pending_vblank_event *event,
  302. uint32_t page_flip_flags)
  303. {
  304. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  305. struct drm_device *dev = crtc->dev;
  306. struct drm_gem_object *obj;
  307. unsigned long flags;
  308. if (mdp5_crtc->event) {
  309. dev_err(dev->dev, "already pending flip!\n");
  310. return -EBUSY;
  311. }
  312. obj = msm_framebuffer_bo(new_fb, 0);
  313. spin_lock_irqsave(&dev->event_lock, flags);
  314. mdp5_crtc->event = event;
  315. spin_unlock_irqrestore(&dev->event_lock, flags);
  316. update_fb(crtc, new_fb);
  317. return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
  318. }
  319. static int mdp5_crtc_set_property(struct drm_crtc *crtc,
  320. struct drm_property *property, uint64_t val)
  321. {
  322. // XXX
  323. return -EINVAL;
  324. }
  325. static const struct drm_crtc_funcs mdp5_crtc_funcs = {
  326. .set_config = drm_crtc_helper_set_config,
  327. .destroy = mdp5_crtc_destroy,
  328. .page_flip = mdp5_crtc_page_flip,
  329. .set_property = mdp5_crtc_set_property,
  330. };
  331. static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
  332. .dpms = mdp5_crtc_dpms,
  333. .mode_fixup = mdp5_crtc_mode_fixup,
  334. .mode_set = mdp5_crtc_mode_set,
  335. .prepare = mdp5_crtc_prepare,
  336. .commit = mdp5_crtc_commit,
  337. .mode_set_base = mdp5_crtc_mode_set_base,
  338. .load_lut = mdp5_crtc_load_lut,
  339. };
  340. static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
  341. {
  342. struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
  343. struct drm_crtc *crtc = &mdp5_crtc->base;
  344. struct msm_drm_private *priv = crtc->dev->dev_private;
  345. unsigned pending;
  346. mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
  347. pending = atomic_xchg(&mdp5_crtc->pending, 0);
  348. if (pending & PENDING_FLIP) {
  349. complete_flip(crtc, NULL);
  350. drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
  351. }
  352. }
  353. static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
  354. {
  355. struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
  356. struct drm_crtc *crtc = &mdp5_crtc->base;
  357. DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
  358. crtc_flush(crtc);
  359. }
  360. uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
  361. {
  362. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  363. return mdp5_crtc->vblank.irqmask;
  364. }
  365. void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
  366. {
  367. DBG("cancel: %p", file);
  368. complete_flip(crtc, file);
  369. }
  370. /* set interface for routing crtc->encoder: */
  371. void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
  372. enum mdp5_intf intf_id)
  373. {
  374. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  375. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  376. static const enum mdp5_intfnum intfnum[] = {
  377. INTF0, INTF1, INTF2, INTF3,
  378. };
  379. uint32_t intf_sel;
  380. /* now that we know what irq's we want: */
  381. mdp5_crtc->err.irqmask = intf2err(intf);
  382. mdp5_crtc->vblank.irqmask = intf2vblank(intf);
  383. /* when called from modeset_init(), skip the rest until later: */
  384. if (!mdp5_kms)
  385. return;
  386. intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
  387. switch (intf) {
  388. case 0:
  389. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
  390. intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
  391. break;
  392. case 1:
  393. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
  394. intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
  395. break;
  396. case 2:
  397. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
  398. intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
  399. break;
  400. case 3:
  401. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
  402. intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
  403. break;
  404. default:
  405. BUG();
  406. break;
  407. }
  408. blend_setup(crtc);
  409. DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
  410. mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
  411. mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
  412. MDP5_CTL_OP_MODE(MODE_NONE) |
  413. MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
  414. crtc_flush(crtc);
  415. }
  416. static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
  417. struct drm_plane *plane)
  418. {
  419. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  420. BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
  421. if (mdp5_crtc->planes[pipe_id] == plane)
  422. return;
  423. mdp5_crtc->planes[pipe_id] = plane;
  424. blend_setup(crtc);
  425. if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
  426. crtc_flush(crtc);
  427. }
  428. void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
  429. {
  430. set_attach(crtc, mdp5_plane_pipe(plane), plane);
  431. }
  432. void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
  433. {
  434. set_attach(crtc, mdp5_plane_pipe(plane), NULL);
  435. }
  436. /* initialize crtc */
  437. struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
  438. struct drm_plane *plane, int id)
  439. {
  440. struct drm_crtc *crtc = NULL;
  441. struct mdp5_crtc *mdp5_crtc;
  442. int ret;
  443. mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
  444. if (!mdp5_crtc) {
  445. ret = -ENOMEM;
  446. goto fail;
  447. }
  448. crtc = &mdp5_crtc->base;
  449. mdp5_crtc->plane = plane;
  450. mdp5_crtc->id = id;
  451. mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
  452. mdp5_crtc->err.irq = mdp5_crtc_err_irq;
  453. snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
  454. pipe2name(mdp5_plane_pipe(plane)), id);
  455. ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
  456. "unref fb", unref_fb_worker);
  457. if (ret)
  458. goto fail;
  459. INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
  460. drm_crtc_init(dev, crtc, &mdp5_crtc_funcs);
  461. drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
  462. mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
  463. return crtc;
  464. fail:
  465. if (crtc)
  466. mdp5_crtc_destroy(crtc);
  467. return ERR_PTR(ret);
  468. }