mdp5_crtc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "mdp5_kms.h"
  18. #include <drm/drm_mode.h>
  19. #include "drm_crtc.h"
  20. #include "drm_crtc_helper.h"
  21. #include "drm_flip_work.h"
  22. struct mdp5_crtc {
  23. struct drm_crtc base;
  24. char name[8];
  25. struct drm_plane *plane;
  26. struct drm_plane *planes[8];
  27. int id;
  28. bool enabled;
  29. /* which mixer/encoder we route output to: */
  30. int mixer;
  31. /* if there is a pending flip, these will be non-null: */
  32. struct drm_pending_vblank_event *event;
  33. struct msm_fence_cb pageflip_cb;
  34. #define PENDING_CURSOR 0x1
  35. #define PENDING_FLIP 0x2
  36. atomic_t pending;
  37. /* the fb that we logically (from PoV of KMS API) hold a ref
  38. * to. Which we may not yet be scanning out (we may still
  39. * be scanning out previous in case of page_flip while waiting
  40. * for gpu rendering to complete:
  41. */
  42. struct drm_framebuffer *fb;
  43. /* the fb that we currently hold a scanout ref to: */
  44. struct drm_framebuffer *scanout_fb;
  45. /* for unref'ing framebuffers after scanout completes: */
  46. struct drm_flip_work unref_fb_work;
  47. struct mdp_irq vblank;
  48. struct mdp_irq err;
  49. };
  50. #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
  51. static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
  52. {
  53. struct msm_drm_private *priv = crtc->dev->dev_private;
  54. return to_mdp5_kms(to_mdp_kms(priv->kms));
  55. }
  56. static void request_pending(struct drm_crtc *crtc, uint32_t pending)
  57. {
  58. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  59. atomic_or(pending, &mdp5_crtc->pending);
  60. mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
  61. }
  62. static void crtc_flush(struct drm_crtc *crtc)
  63. {
  64. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  65. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  66. int id = mdp5_crtc->id;
  67. uint32_t i, flush = 0;
  68. for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
  69. struct drm_plane *plane = mdp5_crtc->planes[i];
  70. if (plane) {
  71. enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
  72. flush |= pipe2flush(pipe);
  73. }
  74. }
  75. flush |= mixer2flush(mdp5_crtc->id);
  76. flush |= MDP5_CTL_FLUSH_CTL;
  77. DBG("%s: flush=%08x", mdp5_crtc->name, flush);
  78. mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
  79. }
  80. static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
  81. {
  82. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  83. struct drm_framebuffer *old_fb = mdp5_crtc->fb;
  84. /* grab reference to incoming scanout fb: */
  85. drm_framebuffer_reference(new_fb);
  86. mdp5_crtc->base.primary->fb = new_fb;
  87. mdp5_crtc->fb = new_fb;
  88. if (old_fb)
  89. drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb);
  90. }
  91. /* unlike update_fb(), take a ref to the new scanout fb *before* updating
  92. * plane, then call this. Needed to ensure we don't unref the buffer that
  93. * is actually still being scanned out.
  94. *
  95. * Note that this whole thing goes away with atomic.. since we can defer
  96. * calling into driver until rendering is done.
  97. */
  98. static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
  99. {
  100. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  101. /* flush updates, to make sure hw is updated to new scanout fb,
  102. * so that we can safely queue unref to current fb (ie. next
  103. * vblank we know hw is done w/ previous scanout_fb).
  104. */
  105. crtc_flush(crtc);
  106. if (mdp5_crtc->scanout_fb)
  107. drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
  108. mdp5_crtc->scanout_fb);
  109. mdp5_crtc->scanout_fb = fb;
  110. /* enable vblank to complete flip: */
  111. request_pending(crtc, PENDING_FLIP);
  112. }
  113. /* if file!=NULL, this is preclose potential cancel-flip path */
  114. static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
  115. {
  116. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  117. struct drm_device *dev = crtc->dev;
  118. struct drm_pending_vblank_event *event;
  119. unsigned long flags, i;
  120. spin_lock_irqsave(&dev->event_lock, flags);
  121. event = mdp5_crtc->event;
  122. if (event) {
  123. /* if regular vblank case (!file) or if cancel-flip from
  124. * preclose on file that requested flip, then send the
  125. * event:
  126. */
  127. if (!file || (event->base.file_priv == file)) {
  128. mdp5_crtc->event = NULL;
  129. drm_send_vblank_event(dev, mdp5_crtc->id, event);
  130. }
  131. }
  132. spin_unlock_irqrestore(&dev->event_lock, flags);
  133. for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
  134. struct drm_plane *plane = mdp5_crtc->planes[i];
  135. if (plane)
  136. mdp5_plane_complete_flip(plane);
  137. }
  138. }
  139. static void pageflip_cb(struct msm_fence_cb *cb)
  140. {
  141. struct mdp5_crtc *mdp5_crtc =
  142. container_of(cb, struct mdp5_crtc, pageflip_cb);
  143. struct drm_crtc *crtc = &mdp5_crtc->base;
  144. struct drm_framebuffer *fb = mdp5_crtc->fb;
  145. if (!fb)
  146. return;
  147. drm_framebuffer_reference(fb);
  148. mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
  149. update_scanout(crtc, fb);
  150. }
  151. static void unref_fb_worker(struct drm_flip_work *work, void *val)
  152. {
  153. struct mdp5_crtc *mdp5_crtc =
  154. container_of(work, struct mdp5_crtc, unref_fb_work);
  155. struct drm_device *dev = mdp5_crtc->base.dev;
  156. mutex_lock(&dev->mode_config.mutex);
  157. drm_framebuffer_unreference(val);
  158. mutex_unlock(&dev->mode_config.mutex);
  159. }
  160. static void mdp5_crtc_destroy(struct drm_crtc *crtc)
  161. {
  162. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  163. drm_crtc_cleanup(crtc);
  164. drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
  165. kfree(mdp5_crtc);
  166. }
  167. static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
  168. {
  169. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  170. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  171. bool enabled = (mode == DRM_MODE_DPMS_ON);
  172. DBG("%s: mode=%d", mdp5_crtc->name, mode);
  173. if (enabled != mdp5_crtc->enabled) {
  174. if (enabled) {
  175. mdp5_enable(mdp5_kms);
  176. mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
  177. } else {
  178. mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
  179. mdp5_disable(mdp5_kms);
  180. }
  181. mdp5_crtc->enabled = enabled;
  182. }
  183. }
  184. static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
  185. const struct drm_display_mode *mode,
  186. struct drm_display_mode *adjusted_mode)
  187. {
  188. return true;
  189. }
  190. static void blend_setup(struct drm_crtc *crtc)
  191. {
  192. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  193. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  194. int id = mdp5_crtc->id;
  195. /*
  196. * Hard-coded setup for now until I figure out how the
  197. * layer-mixer works
  198. */
  199. /* LM[id]: */
  200. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id),
  201. MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA);
  202. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0),
  203. MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
  204. MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
  205. MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
  206. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
  207. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
  208. /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
  209. * we want to be setting CTL[m].LAYER[n]. Not sure what the
  210. * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
  211. * used when chaining up mixers for high resolution displays?
  212. */
  213. /* CTL[id]: */
  214. mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0),
  215. MDP5_CTL_LAYER_REG_RGB0(STAGE0) |
  216. MDP5_CTL_LAYER_REG_BORDER_COLOR);
  217. mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0);
  218. mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0);
  219. mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0);
  220. mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0);
  221. }
  222. static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
  223. struct drm_display_mode *mode,
  224. struct drm_display_mode *adjusted_mode,
  225. int x, int y,
  226. struct drm_framebuffer *old_fb)
  227. {
  228. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  229. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  230. int ret;
  231. mode = adjusted_mode;
  232. DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
  233. mdp5_crtc->name, mode->base.id, mode->name,
  234. mode->vrefresh, mode->clock,
  235. mode->hdisplay, mode->hsync_start,
  236. mode->hsync_end, mode->htotal,
  237. mode->vdisplay, mode->vsync_start,
  238. mode->vsync_end, mode->vtotal,
  239. mode->type, mode->flags);
  240. /* grab extra ref for update_scanout() */
  241. drm_framebuffer_reference(crtc->primary->fb);
  242. ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->primary->fb,
  243. 0, 0, mode->hdisplay, mode->vdisplay,
  244. x << 16, y << 16,
  245. mode->hdisplay << 16, mode->vdisplay << 16);
  246. if (ret) {
  247. drm_framebuffer_unreference(crtc->primary->fb);
  248. dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
  249. mdp5_crtc->name, ret);
  250. return ret;
  251. }
  252. mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
  253. MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
  254. MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
  255. update_fb(crtc, crtc->primary->fb);
  256. update_scanout(crtc, crtc->primary->fb);
  257. return 0;
  258. }
  259. static void mdp5_crtc_prepare(struct drm_crtc *crtc)
  260. {
  261. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  262. DBG("%s", mdp5_crtc->name);
  263. /* make sure we hold a ref to mdp clks while setting up mode: */
  264. mdp5_enable(get_kms(crtc));
  265. mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
  266. }
  267. static void mdp5_crtc_commit(struct drm_crtc *crtc)
  268. {
  269. mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
  270. crtc_flush(crtc);
  271. /* drop the ref to mdp clk's that we got in prepare: */
  272. mdp5_disable(get_kms(crtc));
  273. }
  274. static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
  275. struct drm_framebuffer *old_fb)
  276. {
  277. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  278. struct drm_plane *plane = mdp5_crtc->plane;
  279. struct drm_display_mode *mode = &crtc->mode;
  280. int ret;
  281. /* grab extra ref for update_scanout() */
  282. drm_framebuffer_reference(crtc->primary->fb);
  283. ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb,
  284. 0, 0, mode->hdisplay, mode->vdisplay,
  285. x << 16, y << 16,
  286. mode->hdisplay << 16, mode->vdisplay << 16);
  287. if (ret) {
  288. drm_framebuffer_unreference(crtc->primary->fb);
  289. return ret;
  290. }
  291. update_fb(crtc, crtc->primary->fb);
  292. update_scanout(crtc, crtc->primary->fb);
  293. return 0;
  294. }
  295. static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
  296. {
  297. }
  298. static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
  299. struct drm_framebuffer *new_fb,
  300. struct drm_pending_vblank_event *event,
  301. uint32_t page_flip_flags)
  302. {
  303. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  304. struct drm_device *dev = crtc->dev;
  305. struct drm_gem_object *obj;
  306. unsigned long flags;
  307. if (mdp5_crtc->event) {
  308. dev_err(dev->dev, "already pending flip!\n");
  309. return -EBUSY;
  310. }
  311. obj = msm_framebuffer_bo(new_fb, 0);
  312. spin_lock_irqsave(&dev->event_lock, flags);
  313. mdp5_crtc->event = event;
  314. spin_unlock_irqrestore(&dev->event_lock, flags);
  315. update_fb(crtc, new_fb);
  316. return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
  317. }
  318. static int mdp5_crtc_set_property(struct drm_crtc *crtc,
  319. struct drm_property *property, uint64_t val)
  320. {
  321. // XXX
  322. return -EINVAL;
  323. }
  324. static const struct drm_crtc_funcs mdp5_crtc_funcs = {
  325. .set_config = drm_crtc_helper_set_config,
  326. .destroy = mdp5_crtc_destroy,
  327. .page_flip = mdp5_crtc_page_flip,
  328. .set_property = mdp5_crtc_set_property,
  329. };
  330. static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
  331. .dpms = mdp5_crtc_dpms,
  332. .mode_fixup = mdp5_crtc_mode_fixup,
  333. .mode_set = mdp5_crtc_mode_set,
  334. .prepare = mdp5_crtc_prepare,
  335. .commit = mdp5_crtc_commit,
  336. .mode_set_base = mdp5_crtc_mode_set_base,
  337. .load_lut = mdp5_crtc_load_lut,
  338. };
  339. static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
  340. {
  341. struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
  342. struct drm_crtc *crtc = &mdp5_crtc->base;
  343. struct msm_drm_private *priv = crtc->dev->dev_private;
  344. unsigned pending;
  345. mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
  346. pending = atomic_xchg(&mdp5_crtc->pending, 0);
  347. if (pending & PENDING_FLIP) {
  348. complete_flip(crtc, NULL);
  349. drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
  350. }
  351. }
  352. static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
  353. {
  354. struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
  355. struct drm_crtc *crtc = &mdp5_crtc->base;
  356. DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
  357. crtc_flush(crtc);
  358. }
  359. uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
  360. {
  361. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  362. return mdp5_crtc->vblank.irqmask;
  363. }
  364. void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
  365. {
  366. DBG("cancel: %p", file);
  367. complete_flip(crtc, file);
  368. }
  369. /* set interface for routing crtc->encoder: */
  370. void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
  371. enum mdp5_intf intf_id)
  372. {
  373. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  374. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  375. static const enum mdp5_intfnum intfnum[] = {
  376. INTF0, INTF1, INTF2, INTF3,
  377. };
  378. uint32_t intf_sel;
  379. /* now that we know what irq's we want: */
  380. mdp5_crtc->err.irqmask = intf2err(intf);
  381. mdp5_crtc->vblank.irqmask = intf2vblank(intf);
  382. /* when called from modeset_init(), skip the rest until later: */
  383. if (!mdp5_kms)
  384. return;
  385. intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
  386. switch (intf) {
  387. case 0:
  388. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
  389. intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
  390. break;
  391. case 1:
  392. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
  393. intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
  394. break;
  395. case 2:
  396. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
  397. intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
  398. break;
  399. case 3:
  400. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
  401. intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
  402. break;
  403. default:
  404. BUG();
  405. break;
  406. }
  407. blend_setup(crtc);
  408. DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
  409. mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
  410. mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
  411. MDP5_CTL_OP_MODE(MODE_NONE) |
  412. MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
  413. crtc_flush(crtc);
  414. }
  415. static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
  416. struct drm_plane *plane)
  417. {
  418. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  419. BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes));
  420. if (mdp5_crtc->planes[pipe_id] == plane)
  421. return;
  422. mdp5_crtc->planes[pipe_id] = plane;
  423. blend_setup(crtc);
  424. if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
  425. crtc_flush(crtc);
  426. }
  427. void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
  428. {
  429. set_attach(crtc, mdp5_plane_pipe(plane), plane);
  430. }
  431. void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
  432. {
  433. /* don't actually detatch our primary plane: */
  434. if (to_mdp5_crtc(crtc)->plane == plane)
  435. return;
  436. set_attach(crtc, mdp5_plane_pipe(plane), NULL);
  437. }
  438. /* initialize crtc */
  439. struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
  440. struct drm_plane *plane, int id)
  441. {
  442. struct drm_crtc *crtc = NULL;
  443. struct mdp5_crtc *mdp5_crtc;
  444. int ret;
  445. mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
  446. if (!mdp5_crtc) {
  447. ret = -ENOMEM;
  448. goto fail;
  449. }
  450. crtc = &mdp5_crtc->base;
  451. mdp5_crtc->plane = plane;
  452. mdp5_crtc->id = id;
  453. mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
  454. mdp5_crtc->err.irq = mdp5_crtc_err_irq;
  455. snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
  456. pipe2name(mdp5_plane_pipe(plane)), id);
  457. ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
  458. "unref fb", unref_fb_worker);
  459. if (ret)
  460. goto fail;
  461. INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
  462. drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
  463. drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
  464. mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base);
  465. return crtc;
  466. fail:
  467. if (crtc)
  468. mdp5_crtc_destroy(crtc);
  469. return ERR_PTR(ret);
  470. }