mdp5_crtc.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. /*
  2. * Copyright (c) 2014 The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <robdclark@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "mdp5_kms.h"
  19. #include <linux/sort.h>
  20. #include <drm/drm_mode.h>
  21. #include "drm_crtc.h"
  22. #include "drm_crtc_helper.h"
  23. #include "drm_flip_work.h"
  24. #define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
  25. struct mdp5_crtc {
  26. struct drm_crtc base;
  27. char name[8];
  28. int id;
  29. bool enabled;
  30. /* layer mixer used for this CRTC (+ its lock): */
  31. #define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
  32. int lm;
  33. spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
  34. /* CTL used for this CRTC: */
  35. struct mdp5_ctl *ctl;
  36. /* if there is a pending flip, these will be non-null: */
  37. struct drm_pending_vblank_event *event;
  38. #define PENDING_CURSOR 0x1
  39. #define PENDING_FLIP 0x2
  40. atomic_t pending;
  41. struct mdp_irq vblank;
  42. struct mdp_irq err;
  43. };
  44. #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
  45. static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
  46. {
  47. struct msm_drm_private *priv = crtc->dev->dev_private;
  48. return to_mdp5_kms(to_mdp_kms(priv->kms));
  49. }
  50. static void request_pending(struct drm_crtc *crtc, uint32_t pending)
  51. {
  52. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  53. atomic_or(pending, &mdp5_crtc->pending);
  54. mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
  55. }
  56. #define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
  57. static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
  58. {
  59. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  60. DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
  61. mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
  62. }
  63. /*
  64. * flush updates, to make sure hw is updated to new scanout fb,
  65. * so that we can safely queue unref to current fb (ie. next
  66. * vblank we know hw is done w/ previous scanout_fb).
  67. */
  68. static void crtc_flush_all(struct drm_crtc *crtc)
  69. {
  70. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  71. struct drm_plane *plane;
  72. uint32_t flush_mask = 0;
  73. /* we could have already released CTL in the disable path: */
  74. if (!mdp5_crtc->ctl)
  75. return;
  76. drm_atomic_crtc_for_each_plane(plane, crtc) {
  77. flush_mask |= mdp5_plane_get_flush(plane);
  78. }
  79. flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
  80. flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
  81. crtc_flush(crtc, flush_mask);
  82. }
  83. /* if file!=NULL, this is preclose potential cancel-flip path */
  84. static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
  85. {
  86. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  87. struct drm_device *dev = crtc->dev;
  88. struct drm_pending_vblank_event *event;
  89. struct drm_plane *plane;
  90. unsigned long flags;
  91. spin_lock_irqsave(&dev->event_lock, flags);
  92. event = mdp5_crtc->event;
  93. if (event) {
  94. /* if regular vblank case (!file) or if cancel-flip from
  95. * preclose on file that requested flip, then send the
  96. * event:
  97. */
  98. if (!file || (event->base.file_priv == file)) {
  99. mdp5_crtc->event = NULL;
  100. DBG("%s: send event: %p", mdp5_crtc->name, event);
  101. drm_send_vblank_event(dev, mdp5_crtc->id, event);
  102. }
  103. }
  104. spin_unlock_irqrestore(&dev->event_lock, flags);
  105. drm_atomic_crtc_for_each_plane(plane, crtc) {
  106. mdp5_plane_complete_flip(plane);
  107. }
  108. }
  109. static void mdp5_crtc_destroy(struct drm_crtc *crtc)
  110. {
  111. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  112. drm_crtc_cleanup(crtc);
  113. kfree(mdp5_crtc);
  114. }
  115. static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
  116. {
  117. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  118. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  119. bool enabled = (mode == DRM_MODE_DPMS_ON);
  120. DBG("%s: mode=%d", mdp5_crtc->name, mode);
  121. if (enabled != mdp5_crtc->enabled) {
  122. if (enabled) {
  123. mdp5_enable(mdp5_kms);
  124. mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
  125. } else {
  126. /* set STAGE_UNUSED for all layers */
  127. mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
  128. mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
  129. mdp5_disable(mdp5_kms);
  130. }
  131. mdp5_crtc->enabled = enabled;
  132. }
  133. }
  134. static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
  135. const struct drm_display_mode *mode,
  136. struct drm_display_mode *adjusted_mode)
  137. {
  138. return true;
  139. }
  140. /*
  141. * blend_setup() - blend all the planes of a CRTC
  142. *
  143. * When border is enabled, the border color will ALWAYS be the base layer.
  144. * Therefore, the first plane (private RGB pipe) will start at STAGE0.
  145. * If disabled, the first plane starts at STAGE_BASE.
  146. *
  147. * Note:
  148. * Border is not enabled here because the private plane is exactly
  149. * the CRTC resolution.
  150. */
  151. static void blend_setup(struct drm_crtc *crtc)
  152. {
  153. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  154. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  155. struct drm_plane *plane;
  156. const struct mdp5_cfg_hw *hw_cfg;
  157. uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
  158. unsigned long flags;
  159. #define blender(stage) ((stage) - STAGE_BASE)
  160. hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
  161. spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
  162. /* ctl could be released already when we are shutting down: */
  163. if (!mdp5_crtc->ctl)
  164. goto out;
  165. drm_atomic_crtc_for_each_plane(plane, crtc) {
  166. enum mdp_mixer_stage_id stage =
  167. to_mdp5_plane_state(plane->state)->stage;
  168. /*
  169. * Note: This cannot happen with current implementation but
  170. * we need to check this condition once z property is added
  171. */
  172. BUG_ON(stage > hw_cfg->lm.nb_stages);
  173. /* LM */
  174. mdp5_write(mdp5_kms,
  175. REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
  176. MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
  177. MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
  178. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
  179. blender(stage)), 0xff);
  180. mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
  181. blender(stage)), 0x00);
  182. /* CTL */
  183. blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
  184. DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
  185. pipe2name(mdp5_plane_pipe(plane)), stage);
  186. }
  187. DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
  188. mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
  189. out:
  190. spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
  191. }
  192. static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
  193. {
  194. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  195. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  196. unsigned long flags;
  197. struct drm_display_mode *mode;
  198. if (WARN_ON(!crtc->state))
  199. return;
  200. mode = &crtc->state->adjusted_mode;
  201. DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
  202. mdp5_crtc->name, mode->base.id, mode->name,
  203. mode->vrefresh, mode->clock,
  204. mode->hdisplay, mode->hsync_start,
  205. mode->hsync_end, mode->htotal,
  206. mode->vdisplay, mode->vsync_start,
  207. mode->vsync_end, mode->vtotal,
  208. mode->type, mode->flags);
  209. spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
  210. mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
  211. MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
  212. MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
  213. spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
  214. }
  215. static void mdp5_crtc_prepare(struct drm_crtc *crtc)
  216. {
  217. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  218. DBG("%s", mdp5_crtc->name);
  219. /* make sure we hold a ref to mdp clks while setting up mode: */
  220. mdp5_enable(get_kms(crtc));
  221. mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
  222. }
  223. static void mdp5_crtc_commit(struct drm_crtc *crtc)
  224. {
  225. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  226. DBG("%s", mdp5_crtc->name);
  227. mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
  228. crtc_flush_all(crtc);
  229. /* drop the ref to mdp clk's that we got in prepare: */
  230. mdp5_disable(get_kms(crtc));
  231. }
  232. static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
  233. {
  234. }
  235. struct plane_state {
  236. struct drm_plane *plane;
  237. struct mdp5_plane_state *state;
  238. };
  239. static int pstate_cmp(const void *a, const void *b)
  240. {
  241. struct plane_state *pa = (struct plane_state *)a;
  242. struct plane_state *pb = (struct plane_state *)b;
  243. return pa->state->zpos - pb->state->zpos;
  244. }
  245. static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
  246. struct drm_crtc_state *state)
  247. {
  248. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  249. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  250. struct drm_plane *plane;
  251. struct drm_device *dev = crtc->dev;
  252. struct plane_state pstates[STAGE3 + 1];
  253. int cnt = 0, i;
  254. DBG("%s: check", mdp5_crtc->name);
  255. /* request a free CTL, if none is already allocated for this CRTC */
  256. if (state->enable && !mdp5_crtc->ctl) {
  257. mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
  258. if (WARN_ON(!mdp5_crtc->ctl))
  259. return -EINVAL;
  260. }
  261. /* verify that there are not too many planes attached to crtc
  262. * and that we don't have conflicting mixer stages:
  263. */
  264. drm_atomic_crtc_state_for_each_plane(plane, state) {
  265. struct drm_plane_state *pstate;
  266. if (cnt >= ARRAY_SIZE(pstates)) {
  267. dev_err(dev->dev, "too many planes!\n");
  268. return -EINVAL;
  269. }
  270. pstate = state->state->plane_states[drm_plane_index(plane)];
  271. /* plane might not have changed, in which case take
  272. * current state:
  273. */
  274. if (!pstate)
  275. pstate = plane->state;
  276. pstates[cnt].plane = plane;
  277. pstates[cnt].state = to_mdp5_plane_state(pstate);
  278. cnt++;
  279. }
  280. sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
  281. for (i = 0; i < cnt; i++) {
  282. pstates[i].state->stage = STAGE_BASE + i;
  283. DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
  284. pipe2name(mdp5_plane_pipe(pstates[i].plane)),
  285. pstates[i].state->stage);
  286. }
  287. return 0;
  288. }
  289. static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
  290. {
  291. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  292. DBG("%s: begin", mdp5_crtc->name);
  293. }
  294. static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
  295. {
  296. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  297. struct drm_device *dev = crtc->dev;
  298. unsigned long flags;
  299. DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
  300. WARN_ON(mdp5_crtc->event);
  301. spin_lock_irqsave(&dev->event_lock, flags);
  302. mdp5_crtc->event = crtc->state->event;
  303. spin_unlock_irqrestore(&dev->event_lock, flags);
  304. blend_setup(crtc);
  305. crtc_flush_all(crtc);
  306. request_pending(crtc, PENDING_FLIP);
  307. if (mdp5_crtc->ctl && !crtc->state->enable) {
  308. mdp5_ctl_release(mdp5_crtc->ctl);
  309. mdp5_crtc->ctl = NULL;
  310. }
  311. }
  312. static int mdp5_crtc_set_property(struct drm_crtc *crtc,
  313. struct drm_property *property, uint64_t val)
  314. {
  315. // XXX
  316. return -EINVAL;
  317. }
  318. static const struct drm_crtc_funcs mdp5_crtc_funcs = {
  319. .set_config = drm_atomic_helper_set_config,
  320. .destroy = mdp5_crtc_destroy,
  321. .page_flip = drm_atomic_helper_page_flip,
  322. .set_property = mdp5_crtc_set_property,
  323. .reset = drm_atomic_helper_crtc_reset,
  324. .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
  325. .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
  326. };
  327. static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
  328. .dpms = mdp5_crtc_dpms,
  329. .mode_fixup = mdp5_crtc_mode_fixup,
  330. .mode_set_nofb = mdp5_crtc_mode_set_nofb,
  331. .mode_set = drm_helper_crtc_mode_set,
  332. .mode_set_base = drm_helper_crtc_mode_set_base,
  333. .prepare = mdp5_crtc_prepare,
  334. .commit = mdp5_crtc_commit,
  335. .load_lut = mdp5_crtc_load_lut,
  336. .atomic_check = mdp5_crtc_atomic_check,
  337. .atomic_begin = mdp5_crtc_atomic_begin,
  338. .atomic_flush = mdp5_crtc_atomic_flush,
  339. };
  340. static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
  341. {
  342. struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
  343. struct drm_crtc *crtc = &mdp5_crtc->base;
  344. unsigned pending;
  345. mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
  346. pending = atomic_xchg(&mdp5_crtc->pending, 0);
  347. if (pending & PENDING_FLIP) {
  348. complete_flip(crtc, NULL);
  349. }
  350. }
  351. static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
  352. {
  353. struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
  354. DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
  355. }
  356. uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
  357. {
  358. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  359. return mdp5_crtc->vblank.irqmask;
  360. }
  361. void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
  362. {
  363. DBG("cancel: %p", file);
  364. complete_flip(crtc, file);
  365. }
  366. /* set interface for routing crtc->encoder: */
  367. void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
  368. enum mdp5_intf intf_id)
  369. {
  370. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  371. struct mdp5_kms *mdp5_kms = get_kms(crtc);
  372. uint32_t flush_mask = 0;
  373. uint32_t intf_sel;
  374. unsigned long flags;
  375. /* now that we know what irq's we want: */
  376. mdp5_crtc->err.irqmask = intf2err(intf);
  377. mdp5_crtc->vblank.irqmask = intf2vblank(intf);
  378. mdp_irq_update(&mdp5_kms->base);
  379. spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
  380. intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
  381. switch (intf) {
  382. case 0:
  383. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
  384. intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
  385. break;
  386. case 1:
  387. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
  388. intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
  389. break;
  390. case 2:
  391. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
  392. intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
  393. break;
  394. case 3:
  395. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
  396. intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
  397. break;
  398. default:
  399. BUG();
  400. break;
  401. }
  402. mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
  403. spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
  404. DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
  405. mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
  406. flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
  407. flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
  408. crtc_flush(crtc, flush_mask);
  409. }
  410. int mdp5_crtc_get_lm(struct drm_crtc *crtc)
  411. {
  412. struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
  413. if (WARN_ON(!crtc))
  414. return -EINVAL;
  415. return mdp5_crtc->lm;
  416. }
  417. /* initialize crtc */
  418. struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
  419. struct drm_plane *plane, int id)
  420. {
  421. struct drm_crtc *crtc = NULL;
  422. struct mdp5_crtc *mdp5_crtc;
  423. mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
  424. if (!mdp5_crtc)
  425. return ERR_PTR(-ENOMEM);
  426. crtc = &mdp5_crtc->base;
  427. mdp5_crtc->id = id;
  428. mdp5_crtc->lm = GET_LM_ID(id);
  429. spin_lock_init(&mdp5_crtc->lm_lock);
  430. mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
  431. mdp5_crtc->err.irq = mdp5_crtc_err_irq;
  432. snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
  433. pipe2name(mdp5_plane_pipe(plane)), id);
  434. drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
  435. drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
  436. plane->crtc = crtc;
  437. mdp5_plane_install_properties(plane, &crtc->base);
  438. return crtc;
  439. }