mdp4_crtc.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "mdp4_kms.h"
  18. #include <drm/drm_mode.h>
  19. #include "drm_crtc.h"
  20. #include "drm_crtc_helper.h"
  21. #include "drm_flip_work.h"
  22. struct mdp4_crtc {
  23. struct drm_crtc base;
  24. char name[8];
  25. struct drm_plane *plane;
  26. struct drm_plane *planes[8];
  27. int id;
  28. int ovlp;
  29. enum mdp4_dma dma;
  30. bool enabled;
  31. /* which mixer/encoder we route output to: */
  32. int mixer;
  33. struct {
  34. spinlock_t lock;
  35. bool stale;
  36. uint32_t width, height;
  37. uint32_t x, y;
  38. /* next cursor to scan-out: */
  39. uint32_t next_iova;
  40. struct drm_gem_object *next_bo;
  41. /* current cursor being scanned out: */
  42. struct drm_gem_object *scanout_bo;
  43. } cursor;
  44. /* if there is a pending flip, these will be non-null: */
  45. struct drm_pending_vblank_event *event;
  46. struct msm_fence_cb pageflip_cb;
  47. #define PENDING_CURSOR 0x1
  48. #define PENDING_FLIP 0x2
  49. atomic_t pending;
  50. /* the fb that we logically (from PoV of KMS API) hold a ref
  51. * to. Which we may not yet be scanning out (we may still
  52. * be scanning out previous in case of page_flip while waiting
  53. * for gpu rendering to complete:
  54. */
  55. struct drm_framebuffer *fb;
  56. /* the fb that we currently hold a scanout ref to: */
  57. struct drm_framebuffer *scanout_fb;
  58. /* for unref'ing framebuffers after scanout completes: */
  59. struct drm_flip_work unref_fb_work;
  60. /* for unref'ing cursor bo's after scanout completes: */
  61. struct drm_flip_work unref_cursor_work;
  62. struct mdp_irq vblank;
  63. struct mdp_irq err;
  64. };
  65. #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
  66. static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
  67. {
  68. struct msm_drm_private *priv = crtc->dev->dev_private;
  69. return to_mdp4_kms(to_mdp_kms(priv->kms));
  70. }
  71. static void request_pending(struct drm_crtc *crtc, uint32_t pending)
  72. {
  73. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  74. atomic_or(pending, &mdp4_crtc->pending);
  75. mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
  76. }
  77. static void crtc_flush(struct drm_crtc *crtc)
  78. {
  79. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  80. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  81. uint32_t i, flush = 0;
  82. for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
  83. struct drm_plane *plane = mdp4_crtc->planes[i];
  84. if (plane) {
  85. enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
  86. flush |= pipe2flush(pipe_id);
  87. }
  88. }
  89. flush |= ovlp2flush(mdp4_crtc->ovlp);
  90. DBG("%s: flush=%08x", mdp4_crtc->name, flush);
  91. mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
  92. }
  93. static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
  94. {
  95. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  96. struct drm_framebuffer *old_fb = mdp4_crtc->fb;
  97. /* grab reference to incoming scanout fb: */
  98. drm_framebuffer_reference(new_fb);
  99. mdp4_crtc->base.fb = new_fb;
  100. mdp4_crtc->fb = new_fb;
  101. if (old_fb)
  102. drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
  103. }
  104. /* unlike update_fb(), take a ref to the new scanout fb *before* updating
  105. * plane, then call this. Needed to ensure we don't unref the buffer that
  106. * is actually still being scanned out.
  107. *
  108. * Note that this whole thing goes away with atomic.. since we can defer
  109. * calling into driver until rendering is done.
  110. */
  111. static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
  112. {
  113. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  114. /* flush updates, to make sure hw is updated to new scanout fb,
  115. * so that we can safely queue unref to current fb (ie. next
  116. * vblank we know hw is done w/ previous scanout_fb).
  117. */
  118. crtc_flush(crtc);
  119. if (mdp4_crtc->scanout_fb)
  120. drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
  121. mdp4_crtc->scanout_fb);
  122. mdp4_crtc->scanout_fb = fb;
  123. /* enable vblank to complete flip: */
  124. request_pending(crtc, PENDING_FLIP);
  125. }
  126. /* if file!=NULL, this is preclose potential cancel-flip path */
  127. static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
  128. {
  129. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  130. struct drm_device *dev = crtc->dev;
  131. struct drm_pending_vblank_event *event;
  132. unsigned long flags;
  133. spin_lock_irqsave(&dev->event_lock, flags);
  134. event = mdp4_crtc->event;
  135. if (event) {
  136. /* if regular vblank case (!file) or if cancel-flip from
  137. * preclose on file that requested flip, then send the
  138. * event:
  139. */
  140. if (!file || (event->base.file_priv == file)) {
  141. mdp4_crtc->event = NULL;
  142. drm_send_vblank_event(dev, mdp4_crtc->id, event);
  143. }
  144. }
  145. spin_unlock_irqrestore(&dev->event_lock, flags);
  146. }
  147. static void pageflip_cb(struct msm_fence_cb *cb)
  148. {
  149. struct mdp4_crtc *mdp4_crtc =
  150. container_of(cb, struct mdp4_crtc, pageflip_cb);
  151. struct drm_crtc *crtc = &mdp4_crtc->base;
  152. struct drm_framebuffer *fb = crtc->fb;
  153. if (!fb)
  154. return;
  155. drm_framebuffer_reference(fb);
  156. mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
  157. update_scanout(crtc, fb);
  158. }
  159. static void unref_fb_worker(struct drm_flip_work *work, void *val)
  160. {
  161. struct mdp4_crtc *mdp4_crtc =
  162. container_of(work, struct mdp4_crtc, unref_fb_work);
  163. struct drm_device *dev = mdp4_crtc->base.dev;
  164. mutex_lock(&dev->mode_config.mutex);
  165. drm_framebuffer_unreference(val);
  166. mutex_unlock(&dev->mode_config.mutex);
  167. }
  168. static void unref_cursor_worker(struct drm_flip_work *work, void *val)
  169. {
  170. struct mdp4_crtc *mdp4_crtc =
  171. container_of(work, struct mdp4_crtc, unref_cursor_work);
  172. struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
  173. msm_gem_put_iova(val, mdp4_kms->id);
  174. drm_gem_object_unreference_unlocked(val);
  175. }
  176. static void mdp4_crtc_destroy(struct drm_crtc *crtc)
  177. {
  178. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  179. mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane);
  180. drm_crtc_cleanup(crtc);
  181. drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
  182. drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
  183. kfree(mdp4_crtc);
  184. }
  185. static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
  186. {
  187. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  188. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  189. bool enabled = (mode == DRM_MODE_DPMS_ON);
  190. DBG("%s: mode=%d", mdp4_crtc->name, mode);
  191. if (enabled != mdp4_crtc->enabled) {
  192. if (enabled) {
  193. mdp4_enable(mdp4_kms);
  194. mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
  195. } else {
  196. mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
  197. mdp4_disable(mdp4_kms);
  198. }
  199. mdp4_crtc->enabled = enabled;
  200. }
  201. }
  202. static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
  203. const struct drm_display_mode *mode,
  204. struct drm_display_mode *adjusted_mode)
  205. {
  206. return true;
  207. }
  208. static void blend_setup(struct drm_crtc *crtc)
  209. {
  210. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  211. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  212. int i, ovlp = mdp4_crtc->ovlp;
  213. uint32_t mixer_cfg = 0;
  214. static const enum mdp_mixer_stage_id stages[] = {
  215. STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
  216. };
  217. /* statically (for now) map planes to mixer stage (z-order): */
  218. static const int idxs[] = {
  219. [VG1] = 1,
  220. [VG2] = 2,
  221. [RGB1] = 0,
  222. [RGB2] = 0,
  223. [RGB3] = 0,
  224. [VG3] = 3,
  225. [VG4] = 4,
  226. };
  227. bool alpha[4]= { false, false, false, false };
  228. mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
  229. mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
  230. mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
  231. mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
  232. /* TODO single register for all CRTCs, so this won't work properly
  233. * when multiple CRTCs are active..
  234. */
  235. for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
  236. struct drm_plane *plane = mdp4_crtc->planes[i];
  237. if (plane) {
  238. enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
  239. int idx = idxs[pipe_id];
  240. if (idx > 0) {
  241. const struct mdp_format *format =
  242. to_mdp_format(msm_framebuffer_format(plane->fb));
  243. alpha[idx-1] = format->alpha_enable;
  244. }
  245. mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]);
  246. }
  247. }
  248. /* this shouldn't happen.. and seems to cause underflow: */
  249. WARN_ON(!mixer_cfg);
  250. for (i = 0; i < 4; i++) {
  251. uint32_t op;
  252. if (alpha[i]) {
  253. op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
  254. MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
  255. MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
  256. } else {
  257. op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
  258. MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
  259. }
  260. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
  261. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
  262. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
  263. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
  264. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
  265. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
  266. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
  267. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
  268. }
  269. mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
  270. }
  271. static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
  272. struct drm_display_mode *mode,
  273. struct drm_display_mode *adjusted_mode,
  274. int x, int y,
  275. struct drm_framebuffer *old_fb)
  276. {
  277. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  278. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  279. enum mdp4_dma dma = mdp4_crtc->dma;
  280. int ret, ovlp = mdp4_crtc->ovlp;
  281. mode = adjusted_mode;
  282. DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
  283. mdp4_crtc->name, mode->base.id, mode->name,
  284. mode->vrefresh, mode->clock,
  285. mode->hdisplay, mode->hsync_start,
  286. mode->hsync_end, mode->htotal,
  287. mode->vdisplay, mode->vsync_start,
  288. mode->vsync_end, mode->vtotal,
  289. mode->type, mode->flags);
  290. /* grab extra ref for update_scanout() */
  291. drm_framebuffer_reference(crtc->fb);
  292. ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
  293. 0, 0, mode->hdisplay, mode->vdisplay,
  294. x << 16, y << 16,
  295. mode->hdisplay << 16, mode->vdisplay << 16);
  296. if (ret) {
  297. drm_framebuffer_unreference(crtc->fb);
  298. dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
  299. mdp4_crtc->name, ret);
  300. return ret;
  301. }
  302. mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
  303. MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
  304. MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
  305. /* take data from pipe: */
  306. mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
  307. mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
  308. crtc->fb->pitches[0]);
  309. mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
  310. MDP4_DMA_DST_SIZE_WIDTH(0) |
  311. MDP4_DMA_DST_SIZE_HEIGHT(0));
  312. mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
  313. mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
  314. MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
  315. MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
  316. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
  317. crtc->fb->pitches[0]);
  318. mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
  319. if (dma == DMA_E) {
  320. mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
  321. mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
  322. mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
  323. }
  324. update_fb(crtc, crtc->fb);
  325. update_scanout(crtc, crtc->fb);
  326. return 0;
  327. }
  328. static void mdp4_crtc_prepare(struct drm_crtc *crtc)
  329. {
  330. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  331. DBG("%s", mdp4_crtc->name);
  332. /* make sure we hold a ref to mdp clks while setting up mode: */
  333. mdp4_enable(get_kms(crtc));
  334. mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
  335. }
  336. static void mdp4_crtc_commit(struct drm_crtc *crtc)
  337. {
  338. mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
  339. crtc_flush(crtc);
  340. /* drop the ref to mdp clk's that we got in prepare: */
  341. mdp4_disable(get_kms(crtc));
  342. }
  343. static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
  344. struct drm_framebuffer *old_fb)
  345. {
  346. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  347. struct drm_plane *plane = mdp4_crtc->plane;
  348. struct drm_display_mode *mode = &crtc->mode;
  349. int ret;
  350. /* grab extra ref for update_scanout() */
  351. drm_framebuffer_reference(crtc->fb);
  352. ret = mdp4_plane_mode_set(plane, crtc, crtc->fb,
  353. 0, 0, mode->hdisplay, mode->vdisplay,
  354. x << 16, y << 16,
  355. mode->hdisplay << 16, mode->vdisplay << 16);
  356. if (ret) {
  357. drm_framebuffer_unreference(crtc->fb);
  358. return ret;
  359. }
  360. update_fb(crtc, crtc->fb);
  361. update_scanout(crtc, crtc->fb);
  362. return 0;
  363. }
  364. static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
  365. {
  366. }
  367. static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
  368. struct drm_framebuffer *new_fb,
  369. struct drm_pending_vblank_event *event,
  370. uint32_t page_flip_flags)
  371. {
  372. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  373. struct drm_device *dev = crtc->dev;
  374. struct drm_gem_object *obj;
  375. unsigned long flags;
  376. if (mdp4_crtc->event) {
  377. dev_err(dev->dev, "already pending flip!\n");
  378. return -EBUSY;
  379. }
  380. obj = msm_framebuffer_bo(new_fb, 0);
  381. spin_lock_irqsave(&dev->event_lock, flags);
  382. mdp4_crtc->event = event;
  383. spin_unlock_irqrestore(&dev->event_lock, flags);
  384. update_fb(crtc, new_fb);
  385. return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
  386. }
  387. static int mdp4_crtc_set_property(struct drm_crtc *crtc,
  388. struct drm_property *property, uint64_t val)
  389. {
  390. // XXX
  391. return -EINVAL;
  392. }
  393. #define CURSOR_WIDTH 64
  394. #define CURSOR_HEIGHT 64
  395. /* called from IRQ to update cursor related registers (if needed). The
  396. * cursor registers, other than x/y position, appear not to be double
  397. * buffered, and changing them other than from vblank seems to trigger
  398. * underflow.
  399. */
  400. static void update_cursor(struct drm_crtc *crtc)
  401. {
  402. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  403. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  404. enum mdp4_dma dma = mdp4_crtc->dma;
  405. unsigned long flags;
  406. spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
  407. if (mdp4_crtc->cursor.stale) {
  408. struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
  409. struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
  410. uint32_t iova = mdp4_crtc->cursor.next_iova;
  411. if (next_bo) {
  412. /* take a obj ref + iova ref when we start scanning out: */
  413. drm_gem_object_reference(next_bo);
  414. msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
  415. /* enable cursor: */
  416. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
  417. MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
  418. MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
  419. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
  420. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
  421. MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
  422. MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
  423. } else {
  424. /* disable cursor: */
  425. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
  426. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
  427. MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
  428. }
  429. /* and drop the iova ref + obj rev when done scanning out: */
  430. if (prev_bo)
  431. drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
  432. mdp4_crtc->cursor.scanout_bo = next_bo;
  433. mdp4_crtc->cursor.stale = false;
  434. }
  435. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
  436. MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
  437. MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
  438. spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
  439. }
  440. static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
  441. struct drm_file *file_priv, uint32_t handle,
  442. uint32_t width, uint32_t height)
  443. {
  444. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  445. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  446. struct drm_device *dev = crtc->dev;
  447. struct drm_gem_object *cursor_bo, *old_bo;
  448. unsigned long flags;
  449. uint32_t iova;
  450. int ret;
  451. if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
  452. dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
  453. return -EINVAL;
  454. }
  455. if (handle) {
  456. cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
  457. if (!cursor_bo)
  458. return -ENOENT;
  459. } else {
  460. cursor_bo = NULL;
  461. }
  462. if (cursor_bo) {
  463. ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
  464. if (ret)
  465. goto fail;
  466. } else {
  467. iova = 0;
  468. }
  469. spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
  470. old_bo = mdp4_crtc->cursor.next_bo;
  471. mdp4_crtc->cursor.next_bo = cursor_bo;
  472. mdp4_crtc->cursor.next_iova = iova;
  473. mdp4_crtc->cursor.width = width;
  474. mdp4_crtc->cursor.height = height;
  475. mdp4_crtc->cursor.stale = true;
  476. spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
  477. if (old_bo) {
  478. /* drop our previous reference: */
  479. msm_gem_put_iova(old_bo, mdp4_kms->id);
  480. drm_gem_object_unreference_unlocked(old_bo);
  481. }
  482. crtc_flush(crtc);
  483. request_pending(crtc, PENDING_CURSOR);
  484. return 0;
  485. fail:
  486. drm_gem_object_unreference_unlocked(cursor_bo);
  487. return ret;
  488. }
  489. static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  490. {
  491. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  492. unsigned long flags;
  493. spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
  494. mdp4_crtc->cursor.x = x;
  495. mdp4_crtc->cursor.y = y;
  496. spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
  497. crtc_flush(crtc);
  498. request_pending(crtc, PENDING_CURSOR);
  499. return 0;
  500. }
  501. static const struct drm_crtc_funcs mdp4_crtc_funcs = {
  502. .set_config = drm_crtc_helper_set_config,
  503. .destroy = mdp4_crtc_destroy,
  504. .page_flip = mdp4_crtc_page_flip,
  505. .set_property = mdp4_crtc_set_property,
  506. .cursor_set = mdp4_crtc_cursor_set,
  507. .cursor_move = mdp4_crtc_cursor_move,
  508. };
  509. static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
  510. .dpms = mdp4_crtc_dpms,
  511. .mode_fixup = mdp4_crtc_mode_fixup,
  512. .mode_set = mdp4_crtc_mode_set,
  513. .prepare = mdp4_crtc_prepare,
  514. .commit = mdp4_crtc_commit,
  515. .mode_set_base = mdp4_crtc_mode_set_base,
  516. .load_lut = mdp4_crtc_load_lut,
  517. };
  518. static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
  519. {
  520. struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
  521. struct drm_crtc *crtc = &mdp4_crtc->base;
  522. struct msm_drm_private *priv = crtc->dev->dev_private;
  523. unsigned pending;
  524. mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
  525. pending = atomic_xchg(&mdp4_crtc->pending, 0);
  526. if (pending & PENDING_FLIP) {
  527. complete_flip(crtc, NULL);
  528. drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
  529. }
  530. if (pending & PENDING_CURSOR) {
  531. update_cursor(crtc);
  532. drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
  533. }
  534. }
  535. static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
  536. {
  537. struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
  538. struct drm_crtc *crtc = &mdp4_crtc->base;
  539. DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
  540. crtc_flush(crtc);
  541. }
  542. uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
  543. {
  544. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  545. return mdp4_crtc->vblank.irqmask;
  546. }
  547. void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
  548. {
  549. DBG("cancel: %p", file);
  550. complete_flip(crtc, file);
  551. }
  552. /* set dma config, ie. the format the encoder wants. */
  553. void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
  554. {
  555. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  556. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  557. mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
  558. }
  559. /* set interface for routing crtc->encoder: */
  560. void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
  561. {
  562. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  563. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  564. uint32_t intf_sel;
  565. intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
  566. switch (mdp4_crtc->dma) {
  567. case DMA_P:
  568. intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
  569. intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
  570. break;
  571. case DMA_S:
  572. intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
  573. intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
  574. break;
  575. case DMA_E:
  576. intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
  577. intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
  578. break;
  579. }
  580. if (intf == INTF_DSI_VIDEO) {
  581. intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
  582. intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
  583. mdp4_crtc->mixer = 0;
  584. } else if (intf == INTF_DSI_CMD) {
  585. intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
  586. intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
  587. mdp4_crtc->mixer = 0;
  588. } else if (intf == INTF_LCDC_DTV){
  589. mdp4_crtc->mixer = 1;
  590. }
  591. blend_setup(crtc);
  592. DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
  593. mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
  594. }
  595. static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
  596. struct drm_plane *plane)
  597. {
  598. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  599. BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
  600. if (mdp4_crtc->planes[pipe_id] == plane)
  601. return;
  602. mdp4_crtc->planes[pipe_id] = plane;
  603. blend_setup(crtc);
  604. if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
  605. crtc_flush(crtc);
  606. }
  607. void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
  608. {
  609. set_attach(crtc, mdp4_plane_pipe(plane), plane);
  610. }
  611. void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
  612. {
  613. set_attach(crtc, mdp4_plane_pipe(plane), NULL);
  614. }
  615. static const char *dma_names[] = {
  616. "DMA_P", "DMA_S", "DMA_E",
  617. };
  618. /* initialize crtc */
  619. struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
  620. struct drm_plane *plane, int id, int ovlp_id,
  621. enum mdp4_dma dma_id)
  622. {
  623. struct drm_crtc *crtc = NULL;
  624. struct mdp4_crtc *mdp4_crtc;
  625. int ret;
  626. mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
  627. if (!mdp4_crtc) {
  628. ret = -ENOMEM;
  629. goto fail;
  630. }
  631. crtc = &mdp4_crtc->base;
  632. mdp4_crtc->plane = plane;
  633. mdp4_crtc->id = id;
  634. mdp4_crtc->ovlp = ovlp_id;
  635. mdp4_crtc->dma = dma_id;
  636. mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
  637. mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
  638. mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
  639. mdp4_crtc->err.irq = mdp4_crtc_err_irq;
  640. snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
  641. dma_names[dma_id], ovlp_id);
  642. spin_lock_init(&mdp4_crtc->cursor.lock);
  643. ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
  644. "unref fb", unref_fb_worker);
  645. if (ret)
  646. goto fail;
  647. ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
  648. "unref cursor", unref_cursor_worker);
  649. INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
  650. drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
  651. drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
  652. mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
  653. return crtc;
  654. fail:
  655. if (crtc)
  656. mdp4_crtc_destroy(crtc);
  657. return ERR_PTR(ret);
  658. }