msm_atomic.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "msm_drv.h"
  18. #include "msm_kms.h"
  19. #include "msm_gem.h"
  20. struct msm_commit {
  21. struct drm_atomic_state *state;
  22. uint32_t fence;
  23. struct msm_fence_cb fence_cb;
  24. uint32_t crtc_mask;
  25. };
  26. static void fence_cb(struct msm_fence_cb *cb);
  27. /* block until specified crtcs are no longer pending update, and
  28. * atomically mark them as pending update
  29. */
  30. static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
  31. {
  32. int ret;
  33. spin_lock(&priv->pending_crtcs_event.lock);
  34. ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
  35. !(priv->pending_crtcs & crtc_mask));
  36. if (ret == 0) {
  37. DBG("start: %08x", crtc_mask);
  38. priv->pending_crtcs |= crtc_mask;
  39. }
  40. spin_unlock(&priv->pending_crtcs_event.lock);
  41. return ret;
  42. }
  43. /* clear specified crtcs (no longer pending update)
  44. */
  45. static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
  46. {
  47. spin_lock(&priv->pending_crtcs_event.lock);
  48. DBG("end: %08x", crtc_mask);
  49. priv->pending_crtcs &= ~crtc_mask;
  50. wake_up_all_locked(&priv->pending_crtcs_event);
  51. spin_unlock(&priv->pending_crtcs_event.lock);
  52. }
  53. static struct msm_commit *new_commit(struct drm_atomic_state *state)
  54. {
  55. struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
  56. if (!c)
  57. return NULL;
  58. c->state = state;
  59. /* TODO we might need a way to indicate to run the cb on a
  60. * different wq so wait_for_vblanks() doesn't block retiring
  61. * bo's..
  62. */
  63. INIT_FENCE_CB(&c->fence_cb, fence_cb);
  64. return c;
  65. }
  66. /* The (potentially) asynchronous part of the commit. At this point
  67. * nothing can fail short of armageddon.
  68. */
  69. static void complete_commit(struct msm_commit *c)
  70. {
  71. struct drm_atomic_state *state = c->state;
  72. struct drm_device *dev = state->dev;
  73. drm_atomic_helper_commit_pre_planes(dev, state);
  74. drm_atomic_helper_commit_planes(dev, state);
  75. drm_atomic_helper_commit_post_planes(dev, state);
  76. /* NOTE: _wait_for_vblanks() only waits for vblank on
  77. * enabled CRTCs. So we end up faulting when disabling
  78. * due to (potentially) unref'ing the outgoing fb's
  79. * before the vblank when the disable has latched.
  80. *
  81. * But if it did wait on disabled (or newly disabled)
  82. * CRTCs, that would be racy (ie. we could have missed
  83. * the irq. We need some way to poll for pipe shut
  84. * down. Or just live with occasionally hitting the
  85. * timeout in the CRTC disable path (which really should
  86. * not be critical path)
  87. */
  88. drm_atomic_helper_wait_for_vblanks(dev, state);
  89. drm_atomic_helper_cleanup_planes(dev, state);
  90. drm_atomic_state_free(state);
  91. end_atomic(dev->dev_private, c->crtc_mask);
  92. kfree(c);
  93. }
  94. static void fence_cb(struct msm_fence_cb *cb)
  95. {
  96. struct msm_commit *c =
  97. container_of(cb, struct msm_commit, fence_cb);
  98. complete_commit(c);
  99. }
  100. static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
  101. {
  102. struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
  103. c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
  104. }
  105. /**
  106. * drm_atomic_helper_commit - commit validated state object
  107. * @dev: DRM device
  108. * @state: the driver state object
  109. * @async: asynchronous commit
  110. *
  111. * This function commits a with drm_atomic_helper_check() pre-validated state
  112. * object. This can still fail when e.g. the framebuffer reservation fails. For
  113. * now this doesn't implement asynchronous commits.
  114. *
  115. * RETURNS
  116. * Zero for success or -errno.
  117. */
  118. int msm_atomic_commit(struct drm_device *dev,
  119. struct drm_atomic_state *state, bool async)
  120. {
  121. int nplanes = dev->mode_config.num_total_plane;
  122. int ncrtcs = dev->mode_config.num_crtc;
  123. struct msm_commit *c;
  124. int i, ret;
  125. ret = drm_atomic_helper_prepare_planes(dev, state);
  126. if (ret)
  127. return ret;
  128. c = new_commit(state);
  129. if (!c)
  130. return -ENOMEM;
  131. /*
  132. * Figure out what crtcs we have:
  133. */
  134. for (i = 0; i < ncrtcs; i++) {
  135. struct drm_crtc *crtc = state->crtcs[i];
  136. if (!crtc)
  137. continue;
  138. c->crtc_mask |= (1 << drm_crtc_index(crtc));
  139. }
  140. /*
  141. * Figure out what fence to wait for:
  142. */
  143. for (i = 0; i < nplanes; i++) {
  144. struct drm_plane *plane = state->planes[i];
  145. struct drm_plane_state *new_state = state->plane_states[i];
  146. if (!plane)
  147. continue;
  148. if ((plane->state->fb != new_state->fb) && new_state->fb)
  149. add_fb(c, new_state->fb);
  150. }
  151. /*
  152. * Wait for pending updates on any of the same crtc's and then
  153. * mark our set of crtc's as busy:
  154. */
  155. ret = start_atomic(dev->dev_private, c->crtc_mask);
  156. if (ret)
  157. return ret;
  158. /*
  159. * This is the point of no return - everything below never fails except
  160. * when the hw goes bonghits. Which means we can commit the new state on
  161. * the software side now.
  162. */
  163. drm_atomic_helper_swap_state(dev, state);
  164. /*
  165. * Everything below can be run asynchronously without the need to grab
  166. * any modeset locks at all under one conditions: It must be guaranteed
  167. * that the asynchronous work has either been cancelled (if the driver
  168. * supports it, which at least requires that the framebuffers get
  169. * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
  170. * before the new state gets committed on the software side with
  171. * drm_atomic_helper_swap_state().
  172. *
  173. * This scheme allows new atomic state updates to be prepared and
  174. * checked in parallel to the asynchronous completion of the previous
  175. * update. Which is important since compositors need to figure out the
  176. * composition of the next frame right after having submitted the
  177. * current layout.
  178. */
  179. if (async) {
  180. msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
  181. return 0;
  182. }
  183. ret = msm_wait_fence_interruptable(dev, c->fence, NULL);
  184. if (ret) {
  185. WARN_ON(ret); // TODO unswap state back? or??
  186. kfree(c);
  187. return ret;
  188. }
  189. complete_commit(c);
  190. return 0;
  191. }