msm_atomic.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "msm_drv.h"
  18. #include "msm_kms.h"
  19. #include "msm_gem.h"
  20. #include "msm_fence.h"
  21. struct msm_commit {
  22. struct drm_device *dev;
  23. struct drm_atomic_state *state;
  24. struct work_struct work;
  25. uint32_t crtc_mask;
  26. };
  27. static void commit_worker(struct work_struct *work);
  28. /* block until specified crtcs are no longer pending update, and
  29. * atomically mark them as pending update
  30. */
  31. static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
  32. {
  33. int ret;
  34. spin_lock(&priv->pending_crtcs_event.lock);
  35. ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
  36. !(priv->pending_crtcs & crtc_mask));
  37. if (ret == 0) {
  38. DBG("start: %08x", crtc_mask);
  39. priv->pending_crtcs |= crtc_mask;
  40. }
  41. spin_unlock(&priv->pending_crtcs_event.lock);
  42. return ret;
  43. }
  44. /* clear specified crtcs (no longer pending update)
  45. */
  46. static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
  47. {
  48. spin_lock(&priv->pending_crtcs_event.lock);
  49. DBG("end: %08x", crtc_mask);
  50. priv->pending_crtcs &= ~crtc_mask;
  51. wake_up_all_locked(&priv->pending_crtcs_event);
  52. spin_unlock(&priv->pending_crtcs_event.lock);
  53. }
  54. static struct msm_commit *commit_init(struct drm_atomic_state *state)
  55. {
  56. struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
  57. if (!c)
  58. return NULL;
  59. c->dev = state->dev;
  60. c->state = state;
  61. INIT_WORK(&c->work, commit_worker);
  62. return c;
  63. }
  64. static void commit_destroy(struct msm_commit *c)
  65. {
  66. end_atomic(c->dev->dev_private, c->crtc_mask);
  67. kfree(c);
  68. }
  69. static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
  70. struct drm_atomic_state *old_state)
  71. {
  72. struct drm_crtc *crtc;
  73. struct msm_drm_private *priv = old_state->dev->dev_private;
  74. struct msm_kms *kms = priv->kms;
  75. int ncrtcs = old_state->dev->mode_config.num_crtc;
  76. int i;
  77. for (i = 0; i < ncrtcs; i++) {
  78. crtc = old_state->crtcs[i];
  79. if (!crtc)
  80. continue;
  81. if (!crtc->state->enable)
  82. continue;
  83. /* Legacy cursor ioctls are completely unsynced, and userspace
  84. * relies on that (by doing tons of cursor updates). */
  85. if (old_state->legacy_cursor_update)
  86. continue;
  87. kms->funcs->wait_for_crtc_commit_done(kms, crtc);
  88. }
  89. }
  90. /* The (potentially) asynchronous part of the commit. At this point
  91. * nothing can fail short of armageddon.
  92. */
  93. static void complete_commit(struct msm_commit *c, bool async)
  94. {
  95. struct drm_atomic_state *state = c->state;
  96. struct drm_device *dev = state->dev;
  97. struct msm_drm_private *priv = dev->dev_private;
  98. struct msm_kms *kms = priv->kms;
  99. drm_atomic_helper_wait_for_fences(dev, state);
  100. kms->funcs->prepare_commit(kms, state);
  101. drm_atomic_helper_commit_modeset_disables(dev, state);
  102. drm_atomic_helper_commit_planes(dev, state, false);
  103. drm_atomic_helper_commit_modeset_enables(dev, state);
  104. /* NOTE: _wait_for_vblanks() only waits for vblank on
  105. * enabled CRTCs. So we end up faulting when disabling
  106. * due to (potentially) unref'ing the outgoing fb's
  107. * before the vblank when the disable has latched.
  108. *
  109. * But if it did wait on disabled (or newly disabled)
  110. * CRTCs, that would be racy (ie. we could have missed
  111. * the irq. We need some way to poll for pipe shut
  112. * down. Or just live with occasionally hitting the
  113. * timeout in the CRTC disable path (which really should
  114. * not be critical path)
  115. */
  116. msm_atomic_wait_for_commit_done(dev, state);
  117. drm_atomic_helper_cleanup_planes(dev, state);
  118. kms->funcs->complete_commit(kms, state);
  119. drm_atomic_state_free(state);
  120. commit_destroy(c);
  121. }
  122. static void commit_worker(struct work_struct *work)
  123. {
  124. complete_commit(container_of(work, struct msm_commit, work), true);
  125. }
  126. int msm_atomic_check(struct drm_device *dev,
  127. struct drm_atomic_state *state)
  128. {
  129. int ret;
  130. /*
  131. * msm ->atomic_check can update ->mode_changed for pixel format
  132. * changes, hence must be run before we check the modeset changes.
  133. */
  134. ret = drm_atomic_helper_check_planes(dev, state);
  135. if (ret)
  136. return ret;
  137. ret = drm_atomic_helper_check_modeset(dev, state);
  138. if (ret)
  139. return ret;
  140. return ret;
  141. }
  142. /**
  143. * drm_atomic_helper_commit - commit validated state object
  144. * @dev: DRM device
  145. * @state: the driver state object
  146. * @nonblock: nonblocking commit
  147. *
  148. * This function commits a with drm_atomic_helper_check() pre-validated state
  149. * object. This can still fail when e.g. the framebuffer reservation fails.
  150. *
  151. * RETURNS
  152. * Zero for success or -errno.
  153. */
  154. int msm_atomic_commit(struct drm_device *dev,
  155. struct drm_atomic_state *state, bool nonblock)
  156. {
  157. struct msm_drm_private *priv = dev->dev_private;
  158. int nplanes = dev->mode_config.num_total_plane;
  159. int ncrtcs = dev->mode_config.num_crtc;
  160. struct msm_commit *c;
  161. int i, ret;
  162. ret = drm_atomic_helper_prepare_planes(dev, state);
  163. if (ret)
  164. return ret;
  165. c = commit_init(state);
  166. if (!c) {
  167. ret = -ENOMEM;
  168. goto error;
  169. }
  170. /*
  171. * Figure out what crtcs we have:
  172. */
  173. for (i = 0; i < ncrtcs; i++) {
  174. struct drm_crtc *crtc = state->crtcs[i];
  175. if (!crtc)
  176. continue;
  177. c->crtc_mask |= (1 << drm_crtc_index(crtc));
  178. }
  179. /*
  180. * Figure out what fence to wait for:
  181. */
  182. for (i = 0; i < nplanes; i++) {
  183. struct drm_plane *plane = state->planes[i];
  184. struct drm_plane_state *new_state = state->plane_states[i];
  185. if (!plane)
  186. continue;
  187. if ((plane->state->fb != new_state->fb) && new_state->fb) {
  188. struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0);
  189. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  190. new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
  191. }
  192. }
  193. /*
  194. * Wait for pending updates on any of the same crtc's and then
  195. * mark our set of crtc's as busy:
  196. */
  197. ret = start_atomic(dev->dev_private, c->crtc_mask);
  198. if (ret) {
  199. kfree(c);
  200. goto error;
  201. }
  202. /*
  203. * This is the point of no return - everything below never fails except
  204. * when the hw goes bonghits. Which means we can commit the new state on
  205. * the software side now.
  206. */
  207. drm_atomic_helper_swap_state(dev, state);
  208. /*
  209. * Everything below can be run asynchronously without the need to grab
  210. * any modeset locks at all under one conditions: It must be guaranteed
  211. * that the asynchronous work has either been cancelled (if the driver
  212. * supports it, which at least requires that the framebuffers get
  213. * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
  214. * before the new state gets committed on the software side with
  215. * drm_atomic_helper_swap_state().
  216. *
  217. * This scheme allows new atomic state updates to be prepared and
  218. * checked in parallel to the asynchronous completion of the previous
  219. * update. Which is important since compositors need to figure out the
  220. * composition of the next frame right after having submitted the
  221. * current layout.
  222. */
  223. if (nonblock) {
  224. queue_work(priv->atomic_wq, &c->work);
  225. return 0;
  226. }
  227. complete_commit(c, false);
  228. return 0;
  229. error:
  230. drm_atomic_helper_cleanup_planes(dev, state);
  231. return ret;
  232. }