msm_atomic.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "msm_drv.h"
  18. #include "msm_kms.h"
  19. #include "msm_gem.h"
  20. struct msm_commit {
  21. struct drm_device *dev;
  22. struct drm_atomic_state *state;
  23. uint32_t fence;
  24. struct msm_fence_cb fence_cb;
  25. uint32_t crtc_mask;
  26. };
  27. static void fence_cb(struct msm_fence_cb *cb);
  28. /* block until specified crtcs are no longer pending update, and
  29. * atomically mark them as pending update
  30. */
  31. static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
  32. {
  33. int ret;
  34. spin_lock(&priv->pending_crtcs_event.lock);
  35. ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
  36. !(priv->pending_crtcs & crtc_mask));
  37. if (ret == 0) {
  38. DBG("start: %08x", crtc_mask);
  39. priv->pending_crtcs |= crtc_mask;
  40. }
  41. spin_unlock(&priv->pending_crtcs_event.lock);
  42. return ret;
  43. }
  44. /* clear specified crtcs (no longer pending update)
  45. */
  46. static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
  47. {
  48. spin_lock(&priv->pending_crtcs_event.lock);
  49. DBG("end: %08x", crtc_mask);
  50. priv->pending_crtcs &= ~crtc_mask;
  51. wake_up_all_locked(&priv->pending_crtcs_event);
  52. spin_unlock(&priv->pending_crtcs_event.lock);
  53. }
  54. static struct msm_commit *commit_init(struct drm_atomic_state *state)
  55. {
  56. struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
  57. if (!c)
  58. return NULL;
  59. c->dev = state->dev;
  60. c->state = state;
  61. /* TODO we might need a way to indicate to run the cb on a
  62. * different wq so wait_for_vblanks() doesn't block retiring
  63. * bo's..
  64. */
  65. INIT_FENCE_CB(&c->fence_cb, fence_cb);
  66. return c;
  67. }
  68. static void commit_destroy(struct msm_commit *c)
  69. {
  70. end_atomic(c->dev->dev_private, c->crtc_mask);
  71. kfree(c);
  72. }
  73. static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
  74. struct drm_atomic_state *old_state)
  75. {
  76. struct drm_crtc *crtc;
  77. struct msm_drm_private *priv = old_state->dev->dev_private;
  78. struct msm_kms *kms = priv->kms;
  79. int ncrtcs = old_state->dev->mode_config.num_crtc;
  80. int i;
  81. for (i = 0; i < ncrtcs; i++) {
  82. crtc = old_state->crtcs[i];
  83. if (!crtc)
  84. continue;
  85. if (!crtc->state->enable)
  86. continue;
  87. /* Legacy cursor ioctls are completely unsynced, and userspace
  88. * relies on that (by doing tons of cursor updates). */
  89. if (old_state->legacy_cursor_update)
  90. continue;
  91. kms->funcs->wait_for_crtc_commit_done(kms, crtc);
  92. }
  93. }
  94. /* The (potentially) asynchronous part of the commit. At this point
  95. * nothing can fail short of armageddon.
  96. */
  97. static void complete_commit(struct msm_commit *c)
  98. {
  99. struct drm_atomic_state *state = c->state;
  100. struct drm_device *dev = state->dev;
  101. struct msm_drm_private *priv = dev->dev_private;
  102. struct msm_kms *kms = priv->kms;
  103. kms->funcs->prepare_commit(kms, state);
  104. drm_atomic_helper_commit_modeset_disables(dev, state);
  105. drm_atomic_helper_commit_planes(dev, state);
  106. drm_atomic_helper_commit_modeset_enables(dev, state);
  107. /* NOTE: _wait_for_vblanks() only waits for vblank on
  108. * enabled CRTCs. So we end up faulting when disabling
  109. * due to (potentially) unref'ing the outgoing fb's
  110. * before the vblank when the disable has latched.
  111. *
  112. * But if it did wait on disabled (or newly disabled)
  113. * CRTCs, that would be racy (ie. we could have missed
  114. * the irq. We need some way to poll for pipe shut
  115. * down. Or just live with occasionally hitting the
  116. * timeout in the CRTC disable path (which really should
  117. * not be critical path)
  118. */
  119. msm_atomic_wait_for_commit_done(dev, state);
  120. drm_atomic_helper_cleanup_planes(dev, state);
  121. kms->funcs->complete_commit(kms, state);
  122. drm_atomic_state_free(state);
  123. commit_destroy(c);
  124. }
  125. static void fence_cb(struct msm_fence_cb *cb)
  126. {
  127. struct msm_commit *c =
  128. container_of(cb, struct msm_commit, fence_cb);
  129. complete_commit(c);
  130. }
  131. static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
  132. {
  133. struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
  134. c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
  135. }
  136. int msm_atomic_check(struct drm_device *dev,
  137. struct drm_atomic_state *state)
  138. {
  139. int ret;
  140. /*
  141. * msm ->atomic_check can update ->mode_changed for pixel format
  142. * changes, hence must be run before we check the modeset changes.
  143. */
  144. ret = drm_atomic_helper_check_planes(dev, state);
  145. if (ret)
  146. return ret;
  147. ret = drm_atomic_helper_check_modeset(dev, state);
  148. if (ret)
  149. return ret;
  150. return ret;
  151. }
  152. /**
  153. * drm_atomic_helper_commit - commit validated state object
  154. * @dev: DRM device
  155. * @state: the driver state object
  156. * @async: asynchronous commit
  157. *
  158. * This function commits a with drm_atomic_helper_check() pre-validated state
  159. * object. This can still fail when e.g. the framebuffer reservation fails. For
  160. * now this doesn't implement asynchronous commits.
  161. *
  162. * RETURNS
  163. * Zero for success or -errno.
  164. */
  165. int msm_atomic_commit(struct drm_device *dev,
  166. struct drm_atomic_state *state, bool async)
  167. {
  168. int nplanes = dev->mode_config.num_total_plane;
  169. int ncrtcs = dev->mode_config.num_crtc;
  170. ktime_t timeout;
  171. struct msm_commit *c;
  172. int i, ret;
  173. ret = drm_atomic_helper_prepare_planes(dev, state);
  174. if (ret)
  175. return ret;
  176. c = commit_init(state);
  177. if (!c) {
  178. ret = -ENOMEM;
  179. goto error;
  180. }
  181. /*
  182. * Figure out what crtcs we have:
  183. */
  184. for (i = 0; i < ncrtcs; i++) {
  185. struct drm_crtc *crtc = state->crtcs[i];
  186. if (!crtc)
  187. continue;
  188. c->crtc_mask |= (1 << drm_crtc_index(crtc));
  189. }
  190. /*
  191. * Figure out what fence to wait for:
  192. */
  193. for (i = 0; i < nplanes; i++) {
  194. struct drm_plane *plane = state->planes[i];
  195. struct drm_plane_state *new_state = state->plane_states[i];
  196. if (!plane)
  197. continue;
  198. if ((plane->state->fb != new_state->fb) && new_state->fb)
  199. add_fb(c, new_state->fb);
  200. }
  201. /*
  202. * Wait for pending updates on any of the same crtc's and then
  203. * mark our set of crtc's as busy:
  204. */
  205. ret = start_atomic(dev->dev_private, c->crtc_mask);
  206. if (ret) {
  207. kfree(c);
  208. goto error;
  209. }
  210. /*
  211. * This is the point of no return - everything below never fails except
  212. * when the hw goes bonghits. Which means we can commit the new state on
  213. * the software side now.
  214. */
  215. drm_atomic_helper_swap_state(dev, state);
  216. /*
  217. * Everything below can be run asynchronously without the need to grab
  218. * any modeset locks at all under one conditions: It must be guaranteed
  219. * that the asynchronous work has either been cancelled (if the driver
  220. * supports it, which at least requires that the framebuffers get
  221. * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
  222. * before the new state gets committed on the software side with
  223. * drm_atomic_helper_swap_state().
  224. *
  225. * This scheme allows new atomic state updates to be prepared and
  226. * checked in parallel to the asynchronous completion of the previous
  227. * update. Which is important since compositors need to figure out the
  228. * composition of the next frame right after having submitted the
  229. * current layout.
  230. */
  231. if (async) {
  232. msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
  233. return 0;
  234. }
  235. timeout = ktime_add_ms(ktime_get(), 1000);
  236. /* uninterruptible wait */
  237. msm_wait_fence(dev, c->fence, &timeout, false);
  238. complete_commit(c);
  239. return 0;
  240. error:
  241. drm_atomic_helper_cleanup_planes(dev, state);
  242. return ret;
  243. }