mdp5_crtc.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194
  1. /*
  2. * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <robdclark@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/sort.h>
  19. #include <drm/drm_mode.h>
  20. #include <drm/drm_crtc.h>
  21. #include <drm/drm_crtc_helper.h>
  22. #include <drm/drm_flip_work.h>
  23. #include "mdp5_kms.h"
  24. #define CURSOR_WIDTH 64
  25. #define CURSOR_HEIGHT 64
  26. struct mdp5_crtc {
  27. struct drm_crtc base;
  28. int id;
  29. bool enabled;
  30. spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
  31. /* if there is a pending flip, these will be non-null: */
  32. struct drm_pending_vblank_event *event;
  33. /* Bits have been flushed at the last commit,
  34. * used to decide if a vsync has happened since last commit.
  35. */
  36. u32 flushed_mask;
  37. #define PENDING_CURSOR 0x1
  38. #define PENDING_FLIP 0x2
  39. atomic_t pending;
  40. /* for unref'ing cursor bo's after scanout completes: */
  41. struct drm_flip_work unref_cursor_work;
  42. struct mdp_irq vblank;
  43. struct mdp_irq err;
  44. struct mdp_irq pp_done;
  45. struct completion pp_completion;
  46. bool lm_cursor_enabled;
  47. struct {
  48. /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
  49. spinlock_t lock;
  50. /* current cursor being scanned out: */
  51. struct drm_gem_object *scanout_bo;
  52. uint64_t iova;
  53. uint32_t width, height;
  54. uint32_t x, y;
  55. } cursor;
  56. };
  57. #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
  58. static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
  59. static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
  60. {
  61. struct msm_drm_private *priv = crtc->dev->dev_private;
  62. return to_mdp5_kms(to_mdp_kms(priv->kms));
  63. }
  64. static void request_pending(struct drm_crtc *crtc, uint32_t pending)
  65. {
  66. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  67. atomic_or(pending, &mdp5_crtc->pending);
  68. mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
  69. }
  70. static void request_pp_done_pending(struct drm_crtc *crtc)
  71. {
  72. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  73. reinit_completion(&mdp5_crtc->pp_completion);
  74. }
  75. static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
  76. {
  77. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  78. struct mdp5_ctl *ctl = mdp5_cstate->ctl;
  79. struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
  80. DBG("%s: flush=%08x", crtc->name, flush_mask);
  81. return mdp5_ctl_commit(ctl, pipeline, flush_mask);
  82. }
  83. /*
  84. * flush updates, to make sure hw is updated to new scanout fb,
  85. * so that we can safely queue unref to current fb (ie. next
  86. * vblank we know hw is done w/ previous scanout_fb).
  87. */
  88. static u32 crtc_flush_all(struct drm_crtc *crtc)
  89. {
  90. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  91. struct mdp5_hw_mixer *mixer, *r_mixer;
  92. struct drm_plane *plane;
  93. uint32_t flush_mask = 0;
  94. /* this should not happen: */
  95. if (WARN_ON(!mdp5_cstate->ctl))
  96. return 0;
  97. drm_atomic_crtc_for_each_plane(plane, crtc) {
  98. if (!plane->state->visible)
  99. continue;
  100. flush_mask |= mdp5_plane_get_flush(plane);
  101. }
  102. mixer = mdp5_cstate->pipeline.mixer;
  103. flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
  104. r_mixer = mdp5_cstate->pipeline.r_mixer;
  105. if (r_mixer)
  106. flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
  107. return crtc_flush(crtc, flush_mask);
  108. }
  109. /* if file!=NULL, this is preclose potential cancel-flip path */
  110. static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
  111. {
  112. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  113. struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
  114. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  115. struct mdp5_ctl *ctl = mdp5_cstate->ctl;
  116. struct drm_device *dev = crtc->dev;
  117. struct drm_pending_vblank_event *event;
  118. unsigned long flags;
  119. spin_lock_irqsave(&dev->event_lock, flags);
  120. event = mdp5_crtc->event;
  121. if (event) {
  122. mdp5_crtc->event = NULL;
  123. DBG("%s: send event: %p", crtc->name, event);
  124. drm_crtc_send_vblank_event(crtc, event);
  125. }
  126. spin_unlock_irqrestore(&dev->event_lock, flags);
  127. if (ctl && !crtc->state->enable) {
  128. /* set STAGE_UNUSED for all layers */
  129. mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
  130. /* XXX: What to do here? */
  131. /* mdp5_crtc->ctl = NULL; */
  132. }
  133. }
  134. static void unref_cursor_worker(struct drm_flip_work *work, void *val)
  135. {
  136. struct mdp5_crtc *mdp5_crtc =
  137. container_of(work, struct mdp5_crtc, unref_cursor_work);
  138. struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
  139. struct msm_kms *kms = &mdp5_kms->base.base;
  140. msm_gem_put_iova(val, kms->aspace);
  141. drm_gem_object_unreference_unlocked(val);
  142. }
  143. static void mdp5_crtc_destroy(struct drm_crtc *crtc)
  144. {
  145. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  146. drm_crtc_cleanup(crtc);
  147. drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
  148. kfree(mdp5_crtc);
  149. }
  150. static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
  151. {
  152. switch (stage) {
  153. case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
  154. case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
  155. case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
  156. case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
  157. case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
  158. case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
  159. case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
  160. default:
  161. return 0;
  162. }
  163. }
  164. /*
  165. * left/right pipe offsets for the stage array used in blend_setup()
  166. */
  167. #define PIPE_LEFT 0
  168. #define PIPE_RIGHT 1
  169. /*
  170. * blend_setup() - blend all the planes of a CRTC
  171. *
  172. * If no base layer is available, border will be enabled as the base layer.
  173. * Otherwise all layers will be blended based on their stage calculated
  174. * in mdp5_crtc_atomic_check.
  175. */
  176. static void blend_setup(struct drm_crtc *crtc)
  177. {
  178. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  179. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  180. struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
  181. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  182. struct drm_plane *plane;
  183. const struct mdp5_cfg_hw *hw_cfg;
  184. struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
  185. const struct mdp_format *format;
  186. struct mdp5_hw_mixer *mixer = pipeline->mixer;
  187. uint32_t lm = mixer->lm;
  188. struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
  189. uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
  190. struct mdp5_ctl *ctl = mdp5_cstate->ctl;
  191. uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
  192. unsigned long flags;
  193. enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
  194. enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
  195. int i, plane_cnt = 0;
  196. bool bg_alpha_enabled = false;
  197. u32 mixer_op_mode = 0;
  198. u32 val;
  199. #define blender(stage) ((stage) - STAGE0)
  200. hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
  201. spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
  202. /* ctl could be released already when we are shutting down: */
  203. /* XXX: Can this happen now? */
  204. if (!ctl)
  205. goto out;
  206. /* Collect all plane information */
  207. drm_atomic_crtc_for_each_plane(plane, crtc) {
  208. enum mdp5_pipe right_pipe;
  209. if (!plane->state->visible)
  210. continue;
  211. pstate = to_mdp5_plane_state(plane->state);
  212. pstates[pstate->stage] = pstate;
  213. stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
  214. /*
  215. * if we have a right mixer, stage the same pipe as we
  216. * have on the left mixer
  217. */
  218. if (r_mixer)
  219. r_stage[pstate->stage][PIPE_LEFT] =
  220. mdp5_plane_pipe(plane);
  221. /*
  222. * if we have a right pipe (i.e, the plane comprises of 2
  223. * hwpipes, then stage the right pipe on the right side of both
  224. * the layer mixers
  225. */
  226. right_pipe = mdp5_plane_right_pipe(plane);
  227. if (right_pipe) {
  228. stage[pstate->stage][PIPE_RIGHT] = right_pipe;
  229. r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
  230. }
  231. plane_cnt++;
  232. }
  233. if (!pstates[STAGE_BASE]) {
  234. ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
  235. DBG("Border Color is enabled");
  236. } else if (plane_cnt) {
  237. format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
  238. if (format->alpha_enable)
  239. bg_alpha_enabled = true;
  240. }
  241. /* The reset for blending */
  242. for (i = STAGE0; i <= STAGE_MAX; i++) {
  243. if (!pstates[i])
  244. continue;
  245. format = to_mdp_format(
  246. msm_framebuffer_format(pstates[i]->base.fb));
  247. plane = pstates[i]->base.plane;
  248. blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
  249. MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
  250. fg_alpha = pstates[i]->alpha;
  251. bg_alpha = 0xFF - pstates[i]->alpha;
  252. if (!format->alpha_enable && bg_alpha_enabled)
  253. mixer_op_mode = 0;
  254. else
  255. mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
  256. DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
  257. if (format->alpha_enable && pstates[i]->premultiplied) {
  258. blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
  259. MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
  260. if (fg_alpha != 0xff) {
  261. bg_alpha = fg_alpha;
  262. blend_op |=
  263. MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
  264. MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
  265. } else {
  266. blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
  267. }
  268. } else if (format->alpha_enable) {
  269. blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
  270. MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
  271. if (fg_alpha != 0xff) {
  272. bg_alpha = fg_alpha;
  273. blend_op |=
  274. MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
  275. MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
  276. MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
  277. MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
  278. } else {
  279. blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
  280. }
  281. }
  282. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
  283. blender(i)), blend_op);
  284. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
  285. blender(i)), fg_alpha);
  286. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
  287. blender(i)), bg_alpha);
  288. if (r_mixer) {
  289. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
  290. blender(i)), blend_op);
  291. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
  292. blender(i)), fg_alpha);
  293. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
  294. blender(i)), bg_alpha);
  295. }
  296. }
  297. val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
  298. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
  299. val | mixer_op_mode);
  300. if (r_mixer) {
  301. val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
  302. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
  303. val | mixer_op_mode);
  304. }
  305. mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
  306. ctl_blend_flags);
  307. out:
  308. spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
  309. }
  310. static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
  311. {
  312. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  313. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  314. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  315. struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
  316. struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
  317. uint32_t lm = mixer->lm;
  318. u32 mixer_width, val;
  319. unsigned long flags;
  320. struct drm_display_mode *mode;
  321. if (WARN_ON(!crtc->state))
  322. return;
  323. mode = &crtc->state->adjusted_mode;
  324. DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
  325. crtc->name, mode->base.id, mode->name,
  326. mode->vrefresh, mode->clock,
  327. mode->hdisplay, mode->hsync_start,
  328. mode->hsync_end, mode->htotal,
  329. mode->vdisplay, mode->vsync_start,
  330. mode->vsync_end, mode->vtotal,
  331. mode->type, mode->flags);
  332. mixer_width = mode->hdisplay;
  333. if (r_mixer)
  334. mixer_width /= 2;
  335. spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
  336. mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
  337. MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
  338. MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
  339. /* Assign mixer to LEFT side in source split mode */
  340. val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
  341. val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
  342. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
  343. if (r_mixer) {
  344. u32 r_lm = r_mixer->lm;
  345. mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
  346. MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
  347. MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
  348. /* Assign mixer to RIGHT side in source split mode */
  349. val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
  350. val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
  351. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
  352. }
  353. spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
  354. }
  355. static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
  356. struct drm_crtc_state *old_state)
  357. {
  358. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  359. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  360. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  361. struct device *dev = &mdp5_kms->pdev->dev;
  362. DBG("%s", crtc->name);
  363. if (WARN_ON(!mdp5_crtc->enabled))
  364. return;
  365. /* Disable/save vblank irq handling before power is disabled */
  366. drm_crtc_vblank_off(crtc);
  367. if (mdp5_cstate->cmd_mode)
  368. mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
  369. mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
  370. pm_runtime_put_sync(dev);
  371. mdp5_crtc->enabled = false;
  372. }
  373. static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
  374. struct drm_crtc_state *old_state)
  375. {
  376. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  377. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  378. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  379. struct device *dev = &mdp5_kms->pdev->dev;
  380. DBG("%s", crtc->name);
  381. if (WARN_ON(mdp5_crtc->enabled))
  382. return;
  383. pm_runtime_get_sync(dev);
  384. if (mdp5_crtc->lm_cursor_enabled) {
  385. /*
  386. * Restore LM cursor state, as it might have been lost
  387. * with suspend:
  388. */
  389. if (mdp5_crtc->cursor.iova) {
  390. unsigned long flags;
  391. spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
  392. mdp5_crtc_restore_cursor(crtc);
  393. spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
  394. mdp5_ctl_set_cursor(mdp5_cstate->ctl,
  395. &mdp5_cstate->pipeline, 0, true);
  396. } else {
  397. mdp5_ctl_set_cursor(mdp5_cstate->ctl,
  398. &mdp5_cstate->pipeline, 0, false);
  399. }
  400. }
  401. /* Restore vblank irq handling after power is enabled */
  402. drm_crtc_vblank_on(crtc);
  403. mdp5_crtc_mode_set_nofb(crtc);
  404. mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
  405. if (mdp5_cstate->cmd_mode)
  406. mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
  407. mdp5_crtc->enabled = true;
  408. }
  409. int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
  410. struct drm_crtc_state *new_crtc_state,
  411. bool need_right_mixer)
  412. {
  413. struct mdp5_crtc_state *mdp5_cstate =
  414. to_mdp5_crtc_state(new_crtc_state);
  415. struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
  416. struct mdp5_interface *intf;
  417. bool new_mixer = false;
  418. new_mixer = !pipeline->mixer;
  419. if ((need_right_mixer && !pipeline->r_mixer) ||
  420. (!need_right_mixer && pipeline->r_mixer))
  421. new_mixer = true;
  422. if (new_mixer) {
  423. struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
  424. struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
  425. u32 caps;
  426. int ret;
  427. caps = MDP_LM_CAP_DISPLAY;
  428. if (need_right_mixer)
  429. caps |= MDP_LM_CAP_PAIR;
  430. ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
  431. &pipeline->mixer, need_right_mixer ?
  432. &pipeline->r_mixer : NULL);
  433. if (ret)
  434. return ret;
  435. mdp5_mixer_release(new_crtc_state->state, old_mixer);
  436. if (old_r_mixer) {
  437. mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
  438. if (!need_right_mixer)
  439. pipeline->r_mixer = NULL;
  440. }
  441. }
  442. /*
  443. * these should have been already set up in the encoder's atomic
  444. * check (called by drm_atomic_helper_check_modeset)
  445. */
  446. intf = pipeline->intf;
  447. mdp5_cstate->err_irqmask = intf2err(intf->num);
  448. mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
  449. if ((intf->type == INTF_DSI) &&
  450. (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
  451. mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
  452. mdp5_cstate->cmd_mode = true;
  453. } else {
  454. mdp5_cstate->pp_done_irqmask = 0;
  455. mdp5_cstate->cmd_mode = false;
  456. }
  457. return 0;
  458. }
  459. struct plane_state {
  460. struct drm_plane *plane;
  461. struct mdp5_plane_state *state;
  462. };
  463. static int pstate_cmp(const void *a, const void *b)
  464. {
  465. struct plane_state *pa = (struct plane_state *)a;
  466. struct plane_state *pb = (struct plane_state *)b;
  467. return pa->state->zpos - pb->state->zpos;
  468. }
  469. /* is there a helper for this? */
  470. static bool is_fullscreen(struct drm_crtc_state *cstate,
  471. struct drm_plane_state *pstate)
  472. {
  473. return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
  474. ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
  475. ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
  476. }
  477. static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
  478. struct drm_crtc_state *new_crtc_state,
  479. struct drm_plane_state *bpstate)
  480. {
  481. struct mdp5_crtc_state *mdp5_cstate =
  482. to_mdp5_crtc_state(new_crtc_state);
  483. /*
  484. * if we're in source split mode, it's mandatory to have
  485. * border out on the base stage
  486. */
  487. if (mdp5_cstate->pipeline.r_mixer)
  488. return STAGE0;
  489. /* if the bottom-most layer is not fullscreen, we need to use
  490. * it for solid-color:
  491. */
  492. if (!is_fullscreen(new_crtc_state, bpstate))
  493. return STAGE0;
  494. return STAGE_BASE;
  495. }
  496. static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
  497. struct drm_crtc_state *state)
  498. {
  499. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  500. struct drm_plane *plane;
  501. struct drm_device *dev = crtc->dev;
  502. struct plane_state pstates[STAGE_MAX + 1];
  503. const struct mdp5_cfg_hw *hw_cfg;
  504. const struct drm_plane_state *pstate;
  505. const struct drm_display_mode *mode = &state->adjusted_mode;
  506. bool cursor_plane = false;
  507. bool need_right_mixer = false;
  508. int cnt = 0, i;
  509. int ret;
  510. enum mdp_mixer_stage_id start;
  511. DBG("%s: check", crtc->name);
  512. drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
  513. if (!pstate->visible)
  514. continue;
  515. pstates[cnt].plane = plane;
  516. pstates[cnt].state = to_mdp5_plane_state(pstate);
  517. /*
  518. * if any plane on this crtc uses 2 hwpipes, then we need
  519. * the crtc to have a right hwmixer.
  520. */
  521. if (pstates[cnt].state->r_hwpipe)
  522. need_right_mixer = true;
  523. cnt++;
  524. if (plane->type == DRM_PLANE_TYPE_CURSOR)
  525. cursor_plane = true;
  526. }
  527. /* bail out early if there aren't any planes */
  528. if (!cnt)
  529. return 0;
  530. hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
  531. /*
  532. * we need a right hwmixer if the mode's width is greater than a single
  533. * LM's max width
  534. */
  535. if (mode->hdisplay > hw_cfg->lm.max_width)
  536. need_right_mixer = true;
  537. ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
  538. if (ret) {
  539. dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
  540. return ret;
  541. }
  542. /* assign a stage based on sorted zpos property */
  543. sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
  544. /* trigger a warning if cursor isn't the highest zorder */
  545. WARN_ON(cursor_plane &&
  546. (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
  547. start = get_start_stage(crtc, state, &pstates[0].state->base);
  548. /* verify that there are not too many planes attached to crtc
  549. * and that we don't have conflicting mixer stages:
  550. */
  551. if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
  552. dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
  553. cnt, start);
  554. return -EINVAL;
  555. }
  556. for (i = 0; i < cnt; i++) {
  557. if (cursor_plane && (i == (cnt - 1)))
  558. pstates[i].state->stage = hw_cfg->lm.nb_stages;
  559. else
  560. pstates[i].state->stage = start + i;
  561. DBG("%s: assign pipe %s on stage=%d", crtc->name,
  562. pstates[i].plane->name,
  563. pstates[i].state->stage);
  564. }
  565. return 0;
  566. }
  567. static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
  568. struct drm_crtc_state *old_crtc_state)
  569. {
  570. DBG("%s: begin", crtc->name);
  571. }
  572. static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
  573. struct drm_crtc_state *old_crtc_state)
  574. {
  575. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  576. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  577. struct drm_device *dev = crtc->dev;
  578. unsigned long flags;
  579. DBG("%s: event: %p", crtc->name, crtc->state->event);
  580. WARN_ON(mdp5_crtc->event);
  581. spin_lock_irqsave(&dev->event_lock, flags);
  582. mdp5_crtc->event = crtc->state->event;
  583. spin_unlock_irqrestore(&dev->event_lock, flags);
  584. /*
  585. * If no CTL has been allocated in mdp5_crtc_atomic_check(),
  586. * it means we are trying to flush a CRTC whose state is disabled:
  587. * nothing else needs to be done.
  588. */
  589. /* XXX: Can this happen now ? */
  590. if (unlikely(!mdp5_cstate->ctl))
  591. return;
  592. blend_setup(crtc);
  593. /* PP_DONE irq is only used by command mode for now.
  594. * It is better to request pending before FLUSH and START trigger
  595. * to make sure no pp_done irq missed.
  596. * This is safe because no pp_done will happen before SW trigger
  597. * in command mode.
  598. */
  599. if (mdp5_cstate->cmd_mode)
  600. request_pp_done_pending(crtc);
  601. mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
  602. /* XXX are we leaking out state here? */
  603. mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
  604. mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
  605. mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
  606. request_pending(crtc, PENDING_FLIP);
  607. }
  608. static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
  609. {
  610. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  611. uint32_t xres = crtc->mode.hdisplay;
  612. uint32_t yres = crtc->mode.vdisplay;
  613. /*
  614. * Cursor Region Of Interest (ROI) is a plane read from cursor
  615. * buffer to render. The ROI region is determined by the visibility of
  616. * the cursor point. In the default Cursor image the cursor point will
  617. * be at the top left of the cursor image, unless it is specified
  618. * otherwise using hotspot feature.
  619. *
  620. * If the cursor point reaches the right (xres - x < cursor.width) or
  621. * bottom (yres - y < cursor.height) boundary of the screen, then ROI
  622. * width and ROI height need to be evaluated to crop the cursor image
  623. * accordingly.
  624. * (xres-x) will be new cursor width when x > (xres - cursor.width)
  625. * (yres-y) will be new cursor height when y > (yres - cursor.height)
  626. */
  627. *roi_w = min(mdp5_crtc->cursor.width, xres -
  628. mdp5_crtc->cursor.x);
  629. *roi_h = min(mdp5_crtc->cursor.height, yres -
  630. mdp5_crtc->cursor.y);
  631. }
  632. static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
  633. {
  634. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  635. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  636. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  637. const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
  638. uint32_t blendcfg, stride;
  639. uint32_t x, y, width, height;
  640. uint32_t roi_w, roi_h;
  641. int lm;
  642. assert_spin_locked(&mdp5_crtc->cursor.lock);
  643. lm = mdp5_cstate->pipeline.mixer->lm;
  644. x = mdp5_crtc->cursor.x;
  645. y = mdp5_crtc->cursor.y;
  646. width = mdp5_crtc->cursor.width;
  647. height = mdp5_crtc->cursor.height;
  648. stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
  649. get_roi(crtc, &roi_w, &roi_h);
  650. mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
  651. mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
  652. MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
  653. mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
  654. MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
  655. MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
  656. mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
  657. MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
  658. MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
  659. mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
  660. MDP5_LM_CURSOR_START_XY_Y_START(y) |
  661. MDP5_LM_CURSOR_START_XY_X_START(x));
  662. mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
  663. mdp5_crtc->cursor.iova);
  664. blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
  665. blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
  666. mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
  667. }
  668. static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
  669. struct drm_file *file, uint32_t handle,
  670. uint32_t width, uint32_t height)
  671. {
  672. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  673. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  674. struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
  675. struct drm_device *dev = crtc->dev;
  676. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  677. struct platform_device *pdev = mdp5_kms->pdev;
  678. struct msm_kms *kms = &mdp5_kms->base.base;
  679. struct drm_gem_object *cursor_bo, *old_bo = NULL;
  680. struct mdp5_ctl *ctl;
  681. int ret;
  682. uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
  683. bool cursor_enable = true;
  684. unsigned long flags;
  685. if (!mdp5_crtc->lm_cursor_enabled) {
  686. dev_warn(dev->dev,
  687. "cursor_set is deprecated with cursor planes\n");
  688. return -EINVAL;
  689. }
  690. if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
  691. dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
  692. return -EINVAL;
  693. }
  694. ctl = mdp5_cstate->ctl;
  695. if (!ctl)
  696. return -EINVAL;
  697. /* don't support LM cursors when we we have source split enabled */
  698. if (mdp5_cstate->pipeline.r_mixer)
  699. return -EINVAL;
  700. if (!handle) {
  701. DBG("Cursor off");
  702. cursor_enable = false;
  703. mdp5_crtc->cursor.iova = 0;
  704. pm_runtime_get_sync(&pdev->dev);
  705. goto set_cursor;
  706. }
  707. cursor_bo = drm_gem_object_lookup(file, handle);
  708. if (!cursor_bo)
  709. return -ENOENT;
  710. ret = msm_gem_get_iova(cursor_bo, kms->aspace,
  711. &mdp5_crtc->cursor.iova);
  712. if (ret)
  713. return -EINVAL;
  714. pm_runtime_get_sync(&pdev->dev);
  715. spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
  716. old_bo = mdp5_crtc->cursor.scanout_bo;
  717. mdp5_crtc->cursor.scanout_bo = cursor_bo;
  718. mdp5_crtc->cursor.width = width;
  719. mdp5_crtc->cursor.height = height;
  720. mdp5_crtc_restore_cursor(crtc);
  721. spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
  722. set_cursor:
  723. ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
  724. if (ret) {
  725. dev_err(dev->dev, "failed to %sable cursor: %d\n",
  726. cursor_enable ? "en" : "dis", ret);
  727. goto end;
  728. }
  729. crtc_flush(crtc, flush_mask);
  730. end:
  731. pm_runtime_put_sync(&pdev->dev);
  732. if (old_bo) {
  733. drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
  734. /* enable vblank to complete cursor work: */
  735. request_pending(crtc, PENDING_CURSOR);
  736. }
  737. return ret;
  738. }
  739. static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  740. {
  741. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  742. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  743. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  744. uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
  745. struct drm_device *dev = crtc->dev;
  746. uint32_t roi_w;
  747. uint32_t roi_h;
  748. unsigned long flags;
  749. if (!mdp5_crtc->lm_cursor_enabled) {
  750. dev_warn(dev->dev,
  751. "cursor_move is deprecated with cursor planes\n");
  752. return -EINVAL;
  753. }
  754. /* don't support LM cursors when we we have source split enabled */
  755. if (mdp5_cstate->pipeline.r_mixer)
  756. return -EINVAL;
  757. /* In case the CRTC is disabled, just drop the cursor update */
  758. if (unlikely(!crtc->state->enable))
  759. return 0;
  760. mdp5_crtc->cursor.x = x = max(x, 0);
  761. mdp5_crtc->cursor.y = y = max(y, 0);
  762. get_roi(crtc, &roi_w, &roi_h);
  763. pm_runtime_get_sync(&mdp5_kms->pdev->dev);
  764. spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
  765. mdp5_crtc_restore_cursor(crtc);
  766. spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
  767. crtc_flush(crtc, flush_mask);
  768. pm_runtime_put_sync(&mdp5_kms->pdev->dev);
  769. return 0;
  770. }
  771. static void
  772. mdp5_crtc_atomic_print_state(struct drm_printer *p,
  773. const struct drm_crtc_state *state)
  774. {
  775. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
  776. struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
  777. struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
  778. if (WARN_ON(!pipeline))
  779. return;
  780. drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
  781. pipeline->mixer->name : "(null)");
  782. if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
  783. drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
  784. pipeline->r_mixer->name : "(null)");
  785. }
  786. static void mdp5_crtc_reset(struct drm_crtc *crtc)
  787. {
  788. struct mdp5_crtc_state *mdp5_cstate;
  789. if (crtc->state) {
  790. __drm_atomic_helper_crtc_destroy_state(crtc->state);
  791. kfree(to_mdp5_crtc_state(crtc->state));
  792. }
  793. mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
  794. if (mdp5_cstate) {
  795. mdp5_cstate->base.crtc = crtc;
  796. crtc->state = &mdp5_cstate->base;
  797. }
  798. }
  799. static struct drm_crtc_state *
  800. mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
  801. {
  802. struct mdp5_crtc_state *mdp5_cstate;
  803. if (WARN_ON(!crtc->state))
  804. return NULL;
  805. mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
  806. sizeof(*mdp5_cstate), GFP_KERNEL);
  807. if (!mdp5_cstate)
  808. return NULL;
  809. __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
  810. return &mdp5_cstate->base;
  811. }
  812. static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
  813. {
  814. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
  815. __drm_atomic_helper_crtc_destroy_state(state);
  816. kfree(mdp5_cstate);
  817. }
  818. static const struct drm_crtc_funcs mdp5_crtc_funcs = {
  819. .set_config = drm_atomic_helper_set_config,
  820. .destroy = mdp5_crtc_destroy,
  821. .page_flip = drm_atomic_helper_page_flip,
  822. .reset = mdp5_crtc_reset,
  823. .atomic_duplicate_state = mdp5_crtc_duplicate_state,
  824. .atomic_destroy_state = mdp5_crtc_destroy_state,
  825. .cursor_set = mdp5_crtc_cursor_set,
  826. .cursor_move = mdp5_crtc_cursor_move,
  827. .atomic_print_state = mdp5_crtc_atomic_print_state,
  828. };
  829. static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
  830. .mode_set_nofb = mdp5_crtc_mode_set_nofb,
  831. .atomic_check = mdp5_crtc_atomic_check,
  832. .atomic_begin = mdp5_crtc_atomic_begin,
  833. .atomic_flush = mdp5_crtc_atomic_flush,
  834. .atomic_enable = mdp5_crtc_atomic_enable,
  835. .atomic_disable = mdp5_crtc_atomic_disable,
  836. };
  837. static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
  838. {
  839. struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
  840. struct drm_crtc *crtc = &mdp5_crtc->base;
  841. struct msm_drm_private *priv = crtc->dev->dev_private;
  842. unsigned pending;
  843. mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
  844. pending = atomic_xchg(&mdp5_crtc->pending, 0);
  845. if (pending & PENDING_FLIP) {
  846. complete_flip(crtc, NULL);
  847. }
  848. if (pending & PENDING_CURSOR)
  849. drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
  850. }
  851. static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
  852. {
  853. struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
  854. DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
  855. }
  856. static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
  857. {
  858. struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
  859. pp_done);
  860. complete(&mdp5_crtc->pp_completion);
  861. }
  862. static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
  863. {
  864. struct drm_device *dev = crtc->dev;
  865. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  866. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  867. int ret;
  868. ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
  869. msecs_to_jiffies(50));
  870. if (ret == 0)
  871. dev_warn(dev->dev, "pp done time out, lm=%d\n",
  872. mdp5_cstate->pipeline.mixer->lm);
  873. }
  874. static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
  875. {
  876. struct drm_device *dev = crtc->dev;
  877. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  878. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  879. struct mdp5_ctl *ctl = mdp5_cstate->ctl;
  880. int ret;
  881. /* Should not call this function if crtc is disabled. */
  882. if (!ctl)
  883. return;
  884. ret = drm_crtc_vblank_get(crtc);
  885. if (ret)
  886. return;
  887. ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
  888. ((mdp5_ctl_get_commit_status(ctl) &
  889. mdp5_crtc->flushed_mask) == 0),
  890. msecs_to_jiffies(50));
  891. if (ret <= 0)
  892. dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
  893. mdp5_crtc->flushed_mask = 0;
  894. drm_crtc_vblank_put(crtc);
  895. }
  896. uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
  897. {
  898. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  899. return mdp5_crtc->vblank.irqmask;
  900. }
  901. void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
  902. {
  903. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  904. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  905. /* should this be done elsewhere ? */
  906. mdp_irq_update(&mdp5_kms->base);
  907. mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
  908. }
  909. struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
  910. {
  911. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  912. return mdp5_cstate->ctl;
  913. }
  914. struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
  915. {
  916. struct mdp5_crtc_state *mdp5_cstate;
  917. if (WARN_ON(!crtc))
  918. return ERR_PTR(-EINVAL);
  919. mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  920. return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
  921. ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
  922. }
  923. struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
  924. {
  925. struct mdp5_crtc_state *mdp5_cstate;
  926. if (WARN_ON(!crtc))
  927. return ERR_PTR(-EINVAL);
  928. mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  929. return &mdp5_cstate->pipeline;
  930. }
  931. void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
  932. {
  933. struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
  934. if (mdp5_cstate->cmd_mode)
  935. mdp5_crtc_wait_for_pp_done(crtc);
  936. else
  937. mdp5_crtc_wait_for_flush_done(crtc);
  938. }
  939. /* initialize crtc */
  940. struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
  941. struct drm_plane *plane,
  942. struct drm_plane *cursor_plane, int id)
  943. {
  944. struct drm_crtc *crtc = NULL;
  945. struct mdp5_crtc *mdp5_crtc;
  946. mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
  947. if (!mdp5_crtc)
  948. return ERR_PTR(-ENOMEM);
  949. crtc = &mdp5_crtc->base;
  950. mdp5_crtc->id = id;
  951. spin_lock_init(&mdp5_crtc->lm_lock);
  952. spin_lock_init(&mdp5_crtc->cursor.lock);
  953. init_completion(&mdp5_crtc->pp_completion);
  954. mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
  955. mdp5_crtc->err.irq = mdp5_crtc_err_irq;
  956. mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
  957. mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
  958. drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
  959. &mdp5_crtc_funcs, NULL);
  960. drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
  961. "unref cursor", unref_cursor_worker);
  962. drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
  963. plane->crtc = crtc;
  964. return crtc;
  965. }