xen_drm_front_kms.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /*
  3. * Xen para-virtual DRM device
  4. *
  5. * Copyright (C) 2016-2018 EPAM Systems Inc.
  6. *
  7. * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
  8. */
  9. #include "xen_drm_front_kms.h"
  10. #include <drm/drmP.h>
  11. #include <drm/drm_atomic.h>
  12. #include <drm/drm_atomic_helper.h>
  13. #include <drm/drm_crtc_helper.h>
  14. #include <drm/drm_gem.h>
  15. #include <drm/drm_gem_framebuffer_helper.h>
  16. #include "xen_drm_front.h"
  17. #include "xen_drm_front_conn.h"
  18. /*
  19. * Timeout in ms to wait for frame done event from the backend:
  20. * must be a bit more than IO time-out
  21. */
  22. #define FRAME_DONE_TO_MS (XEN_DRM_FRONT_WAIT_BACK_MS + 100)
  23. static struct xen_drm_front_drm_pipeline *
  24. to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe)
  25. {
  26. return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe);
  27. }
  28. static void fb_destroy(struct drm_framebuffer *fb)
  29. {
  30. struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private;
  31. int idx;
  32. if (drm_dev_enter(fb->dev, &idx)) {
  33. xen_drm_front_fb_detach(drm_info->front_info,
  34. xen_drm_front_fb_to_cookie(fb));
  35. drm_dev_exit(idx);
  36. }
  37. drm_gem_fb_destroy(fb);
  38. }
  39. static struct drm_framebuffer_funcs fb_funcs = {
  40. .destroy = fb_destroy,
  41. };
  42. static struct drm_framebuffer *
  43. fb_create(struct drm_device *dev, struct drm_file *filp,
  44. const struct drm_mode_fb_cmd2 *mode_cmd)
  45. {
  46. struct xen_drm_front_drm_info *drm_info = dev->dev_private;
  47. static struct drm_framebuffer *fb;
  48. struct drm_gem_object *gem_obj;
  49. int ret;
  50. fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
  51. if (IS_ERR_OR_NULL(fb))
  52. return fb;
  53. gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
  54. if (!gem_obj) {
  55. DRM_ERROR("Failed to lookup GEM object\n");
  56. ret = -ENOENT;
  57. goto fail;
  58. }
  59. drm_gem_object_put_unlocked(gem_obj);
  60. ret = xen_drm_front_fb_attach(drm_info->front_info,
  61. xen_drm_front_dbuf_to_cookie(gem_obj),
  62. xen_drm_front_fb_to_cookie(fb),
  63. fb->width, fb->height,
  64. fb->format->format);
  65. if (ret < 0) {
  66. DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret);
  67. goto fail;
  68. }
  69. return fb;
  70. fail:
  71. drm_gem_fb_destroy(fb);
  72. return ERR_PTR(ret);
  73. }
  74. static const struct drm_mode_config_funcs mode_config_funcs = {
  75. .fb_create = fb_create,
  76. .atomic_check = drm_atomic_helper_check,
  77. .atomic_commit = drm_atomic_helper_commit,
  78. };
  79. static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline)
  80. {
  81. struct drm_crtc *crtc = &pipeline->pipe.crtc;
  82. struct drm_device *dev = crtc->dev;
  83. unsigned long flags;
  84. spin_lock_irqsave(&dev->event_lock, flags);
  85. if (pipeline->pending_event)
  86. drm_crtc_send_vblank_event(crtc, pipeline->pending_event);
  87. pipeline->pending_event = NULL;
  88. spin_unlock_irqrestore(&dev->event_lock, flags);
  89. }
  90. static void display_enable(struct drm_simple_display_pipe *pipe,
  91. struct drm_crtc_state *crtc_state,
  92. struct drm_plane_state *plane_state)
  93. {
  94. struct xen_drm_front_drm_pipeline *pipeline =
  95. to_xen_drm_pipeline(pipe);
  96. struct drm_crtc *crtc = &pipe->crtc;
  97. struct drm_framebuffer *fb = plane_state->fb;
  98. int ret, idx;
  99. if (!drm_dev_enter(pipe->crtc.dev, &idx))
  100. return;
  101. ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y,
  102. fb->width, fb->height,
  103. fb->format->cpp[0] * 8,
  104. xen_drm_front_fb_to_cookie(fb));
  105. if (ret) {
  106. DRM_ERROR("Failed to enable display: %d\n", ret);
  107. pipeline->conn_connected = false;
  108. }
  109. drm_dev_exit(idx);
  110. }
  111. static void display_disable(struct drm_simple_display_pipe *pipe)
  112. {
  113. struct xen_drm_front_drm_pipeline *pipeline =
  114. to_xen_drm_pipeline(pipe);
  115. int ret = 0, idx;
  116. if (drm_dev_enter(pipe->crtc.dev, &idx)) {
  117. ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0,
  118. xen_drm_front_fb_to_cookie(NULL));
  119. drm_dev_exit(idx);
  120. }
  121. if (ret)
  122. DRM_ERROR("Failed to disable display: %d\n", ret);
  123. /* Make sure we can restart with enabled connector next time */
  124. pipeline->conn_connected = true;
  125. /* release stalled event if any */
  126. send_pending_event(pipeline);
  127. }
  128. void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
  129. u64 fb_cookie)
  130. {
  131. /*
  132. * This runs in interrupt context, e.g. under
  133. * drm_info->front_info->io_lock, so we cannot call _sync version
  134. * to cancel the work
  135. */
  136. cancel_delayed_work(&pipeline->pflip_to_worker);
  137. send_pending_event(pipeline);
  138. }
  139. static void pflip_to_worker(struct work_struct *work)
  140. {
  141. struct delayed_work *delayed_work = to_delayed_work(work);
  142. struct xen_drm_front_drm_pipeline *pipeline =
  143. container_of(delayed_work,
  144. struct xen_drm_front_drm_pipeline,
  145. pflip_to_worker);
  146. DRM_ERROR("Frame done timed-out, releasing");
  147. send_pending_event(pipeline);
  148. }
  149. static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
  150. struct drm_plane_state *old_plane_state)
  151. {
  152. struct drm_plane_state *plane_state =
  153. drm_atomic_get_new_plane_state(old_plane_state->state,
  154. &pipe->plane);
  155. /*
  156. * If old_plane_state->fb is NULL and plane_state->fb is not,
  157. * then this is an atomic commit which will enable display.
  158. * If old_plane_state->fb is not NULL and plane_state->fb is,
  159. * then this is an atomic commit which will disable display.
  160. * Ignore these and do not send page flip as this framebuffer will be
  161. * sent to the backend as a part of display_set_config call.
  162. */
  163. if (old_plane_state->fb && plane_state->fb) {
  164. struct xen_drm_front_drm_pipeline *pipeline =
  165. to_xen_drm_pipeline(pipe);
  166. struct xen_drm_front_drm_info *drm_info = pipeline->drm_info;
  167. int ret;
  168. schedule_delayed_work(&pipeline->pflip_to_worker,
  169. msecs_to_jiffies(FRAME_DONE_TO_MS));
  170. ret = xen_drm_front_page_flip(drm_info->front_info,
  171. pipeline->index,
  172. xen_drm_front_fb_to_cookie(plane_state->fb));
  173. if (ret) {
  174. DRM_ERROR("Failed to send page flip request to backend: %d\n", ret);
  175. pipeline->conn_connected = false;
  176. /*
  177. * Report the flip not handled, so pending event is
  178. * sent, unblocking user-space.
  179. */
  180. return false;
  181. }
  182. /*
  183. * Signal that page flip was handled, pending event will be sent
  184. * on frame done event from the backend.
  185. */
  186. return true;
  187. }
  188. return false;
  189. }
  190. static void display_update(struct drm_simple_display_pipe *pipe,
  191. struct drm_plane_state *old_plane_state)
  192. {
  193. struct xen_drm_front_drm_pipeline *pipeline =
  194. to_xen_drm_pipeline(pipe);
  195. struct drm_crtc *crtc = &pipe->crtc;
  196. struct drm_pending_vblank_event *event;
  197. int idx;
  198. event = crtc->state->event;
  199. if (event) {
  200. struct drm_device *dev = crtc->dev;
  201. unsigned long flags;
  202. WARN_ON(pipeline->pending_event);
  203. spin_lock_irqsave(&dev->event_lock, flags);
  204. crtc->state->event = NULL;
  205. pipeline->pending_event = event;
  206. spin_unlock_irqrestore(&dev->event_lock, flags);
  207. }
  208. if (!drm_dev_enter(pipe->crtc.dev, &idx)) {
  209. send_pending_event(pipeline);
  210. return;
  211. }
  212. /*
  213. * Send page flip request to the backend *after* we have event cached
  214. * above, so on page flip done event from the backend we can
  215. * deliver it and there is no race condition between this code and
  216. * event from the backend.
  217. * If this is not a page flip, e.g. no flip done event from the backend
  218. * is expected, then send now.
  219. */
  220. if (!display_send_page_flip(pipe, old_plane_state))
  221. send_pending_event(pipeline);
  222. drm_dev_exit(idx);
  223. }
  224. static enum drm_mode_status
  225. display_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
  226. {
  227. struct xen_drm_front_drm_pipeline *pipeline =
  228. container_of(crtc, struct xen_drm_front_drm_pipeline,
  229. pipe.crtc);
  230. if (mode->hdisplay != pipeline->width)
  231. return MODE_ERROR;
  232. if (mode->vdisplay != pipeline->height)
  233. return MODE_ERROR;
  234. return MODE_OK;
  235. }
  236. static const struct drm_simple_display_pipe_funcs display_funcs = {
  237. .mode_valid = display_mode_valid,
  238. .enable = display_enable,
  239. .disable = display_disable,
  240. .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
  241. .update = display_update,
  242. };
  243. static int display_pipe_init(struct xen_drm_front_drm_info *drm_info,
  244. int index, struct xen_drm_front_cfg_connector *cfg,
  245. struct xen_drm_front_drm_pipeline *pipeline)
  246. {
  247. struct drm_device *dev = drm_info->drm_dev;
  248. const u32 *formats;
  249. int format_count;
  250. int ret;
  251. pipeline->drm_info = drm_info;
  252. pipeline->index = index;
  253. pipeline->height = cfg->height;
  254. pipeline->width = cfg->width;
  255. INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker);
  256. ret = xen_drm_front_conn_init(drm_info, &pipeline->conn);
  257. if (ret)
  258. return ret;
  259. formats = xen_drm_front_conn_get_formats(&format_count);
  260. return drm_simple_display_pipe_init(dev, &pipeline->pipe,
  261. &display_funcs, formats,
  262. format_count, NULL,
  263. &pipeline->conn);
  264. }
  265. int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info)
  266. {
  267. struct drm_device *dev = drm_info->drm_dev;
  268. int i, ret;
  269. drm_mode_config_init(dev);
  270. dev->mode_config.min_width = 0;
  271. dev->mode_config.min_height = 0;
  272. dev->mode_config.max_width = 4095;
  273. dev->mode_config.max_height = 2047;
  274. dev->mode_config.funcs = &mode_config_funcs;
  275. for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
  276. struct xen_drm_front_cfg_connector *cfg =
  277. &drm_info->front_info->cfg.connectors[i];
  278. struct xen_drm_front_drm_pipeline *pipeline =
  279. &drm_info->pipeline[i];
  280. ret = display_pipe_init(drm_info, i, cfg, pipeline);
  281. if (ret) {
  282. drm_mode_config_cleanup(dev);
  283. return ret;
  284. }
  285. }
  286. drm_mode_config_reset(dev);
  287. drm_kms_helper_poll_init(dev);
  288. return 0;
  289. }
  290. void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info)
  291. {
  292. int i;
  293. for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
  294. struct xen_drm_front_drm_pipeline *pipeline =
  295. &drm_info->pipeline[i];
  296. cancel_delayed_work_sync(&pipeline->pflip_to_worker);
  297. send_pending_event(pipeline);
  298. }
  299. }