msm_atomic.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "msm_drv.h"
  18. #include "msm_kms.h"
  19. #include "msm_gem.h"
  20. struct msm_commit {
  21. struct drm_device *dev;
  22. struct drm_atomic_state *state;
  23. uint32_t fence;
  24. struct msm_fence_cb fence_cb;
  25. uint32_t crtc_mask;
  26. };
  27. static void fence_cb(struct msm_fence_cb *cb);
  28. /* block until specified crtcs are no longer pending update, and
  29. * atomically mark them as pending update
  30. */
  31. static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
  32. {
  33. int ret;
  34. spin_lock(&priv->pending_crtcs_event.lock);
  35. ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
  36. !(priv->pending_crtcs & crtc_mask));
  37. if (ret == 0) {
  38. DBG("start: %08x", crtc_mask);
  39. priv->pending_crtcs |= crtc_mask;
  40. }
  41. spin_unlock(&priv->pending_crtcs_event.lock);
  42. return ret;
  43. }
  44. /* clear specified crtcs (no longer pending update)
  45. */
  46. static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
  47. {
  48. spin_lock(&priv->pending_crtcs_event.lock);
  49. DBG("end: %08x", crtc_mask);
  50. priv->pending_crtcs &= ~crtc_mask;
  51. wake_up_all_locked(&priv->pending_crtcs_event);
  52. spin_unlock(&priv->pending_crtcs_event.lock);
  53. }
  54. static struct msm_commit *commit_init(struct drm_atomic_state *state)
  55. {
  56. struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
  57. if (!c)
  58. return NULL;
  59. c->dev = state->dev;
  60. c->state = state;
  61. /* TODO we might need a way to indicate to run the cb on a
  62. * different wq so wait_for_vblanks() doesn't block retiring
  63. * bo's..
  64. */
  65. INIT_FENCE_CB(&c->fence_cb, fence_cb);
  66. return c;
  67. }
  68. static void commit_destroy(struct msm_commit *c)
  69. {
  70. end_atomic(c->dev->dev_private, c->crtc_mask);
  71. kfree(c);
  72. }
  73. /* The (potentially) asynchronous part of the commit. At this point
  74. * nothing can fail short of armageddon.
  75. */
  76. static void complete_commit(struct msm_commit *c)
  77. {
  78. struct drm_atomic_state *state = c->state;
  79. struct drm_device *dev = state->dev;
  80. struct msm_drm_private *priv = dev->dev_private;
  81. struct msm_kms *kms = priv->kms;
  82. kms->funcs->prepare_commit(kms, state);
  83. drm_atomic_helper_commit_modeset_disables(dev, state);
  84. drm_atomic_helper_commit_planes(dev, state);
  85. drm_atomic_helper_commit_modeset_enables(dev, state);
  86. /* NOTE: _wait_for_vblanks() only waits for vblank on
  87. * enabled CRTCs. So we end up faulting when disabling
  88. * due to (potentially) unref'ing the outgoing fb's
  89. * before the vblank when the disable has latched.
  90. *
  91. * But if it did wait on disabled (or newly disabled)
  92. * CRTCs, that would be racy (ie. we could have missed
  93. * the irq. We need some way to poll for pipe shut
  94. * down. Or just live with occasionally hitting the
  95. * timeout in the CRTC disable path (which really should
  96. * not be critical path)
  97. */
  98. drm_atomic_helper_wait_for_vblanks(dev, state);
  99. drm_atomic_helper_cleanup_planes(dev, state);
  100. kms->funcs->complete_commit(kms, state);
  101. drm_atomic_state_free(state);
  102. commit_destroy(c);
  103. }
  104. static void fence_cb(struct msm_fence_cb *cb)
  105. {
  106. struct msm_commit *c =
  107. container_of(cb, struct msm_commit, fence_cb);
  108. complete_commit(c);
  109. }
  110. static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
  111. {
  112. struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
  113. c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
  114. }
  115. int msm_atomic_check(struct drm_device *dev,
  116. struct drm_atomic_state *state)
  117. {
  118. int ret;
  119. /*
  120. * msm ->atomic_check can update ->mode_changed for pixel format
  121. * changes, hence must be run before we check the modeset changes.
  122. */
  123. ret = drm_atomic_helper_check_planes(dev, state);
  124. if (ret)
  125. return ret;
  126. ret = drm_atomic_helper_check_modeset(dev, state);
  127. if (ret)
  128. return ret;
  129. return ret;
  130. }
  131. /**
  132. * drm_atomic_helper_commit - commit validated state object
  133. * @dev: DRM device
  134. * @state: the driver state object
  135. * @async: asynchronous commit
  136. *
  137. * This function commits a with drm_atomic_helper_check() pre-validated state
  138. * object. This can still fail when e.g. the framebuffer reservation fails. For
  139. * now this doesn't implement asynchronous commits.
  140. *
  141. * RETURNS
  142. * Zero for success or -errno.
  143. */
  144. int msm_atomic_commit(struct drm_device *dev,
  145. struct drm_atomic_state *state, bool async)
  146. {
  147. int nplanes = dev->mode_config.num_total_plane;
  148. int ncrtcs = dev->mode_config.num_crtc;
  149. struct timespec timeout;
  150. struct msm_commit *c;
  151. int i, ret;
  152. ret = drm_atomic_helper_prepare_planes(dev, state);
  153. if (ret)
  154. return ret;
  155. c = commit_init(state);
  156. if (!c)
  157. return -ENOMEM;
  158. /*
  159. * Figure out what crtcs we have:
  160. */
  161. for (i = 0; i < ncrtcs; i++) {
  162. struct drm_crtc *crtc = state->crtcs[i];
  163. if (!crtc)
  164. continue;
  165. c->crtc_mask |= (1 << drm_crtc_index(crtc));
  166. }
  167. /*
  168. * Figure out what fence to wait for:
  169. */
  170. for (i = 0; i < nplanes; i++) {
  171. struct drm_plane *plane = state->planes[i];
  172. struct drm_plane_state *new_state = state->plane_states[i];
  173. if (!plane)
  174. continue;
  175. if ((plane->state->fb != new_state->fb) && new_state->fb)
  176. add_fb(c, new_state->fb);
  177. }
  178. /*
  179. * Wait for pending updates on any of the same crtc's and then
  180. * mark our set of crtc's as busy:
  181. */
  182. ret = start_atomic(dev->dev_private, c->crtc_mask);
  183. if (ret) {
  184. kfree(c);
  185. return ret;
  186. }
  187. /*
  188. * This is the point of no return - everything below never fails except
  189. * when the hw goes bonghits. Which means we can commit the new state on
  190. * the software side now.
  191. */
  192. drm_atomic_helper_swap_state(dev, state);
  193. /*
  194. * Everything below can be run asynchronously without the need to grab
  195. * any modeset locks at all under one conditions: It must be guaranteed
  196. * that the asynchronous work has either been cancelled (if the driver
  197. * supports it, which at least requires that the framebuffers get
  198. * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
  199. * before the new state gets committed on the software side with
  200. * drm_atomic_helper_swap_state().
  201. *
  202. * This scheme allows new atomic state updates to be prepared and
  203. * checked in parallel to the asynchronous completion of the previous
  204. * update. Which is important since compositors need to figure out the
  205. * composition of the next frame right after having submitted the
  206. * current layout.
  207. */
  208. if (async) {
  209. msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
  210. return 0;
  211. }
  212. jiffies_to_timespec(jiffies + msecs_to_jiffies(1000), &timeout);
  213. ret = msm_wait_fence_interruptable(dev, c->fence, &timeout);
  214. if (ret) {
  215. WARN_ON(ret); // TODO unswap state back? or??
  216. commit_destroy(c);
  217. return ret;
  218. }
  219. complete_commit(c);
  220. return 0;
  221. }