virtgpu_plane.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. /*
  2. * Copyright (C) 2015 Red Hat, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining
  6. * a copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sublicense, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial
  15. * portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. */
  25. #include "virtgpu_drv.h"
  26. #include <drm/drm_plane_helper.h>
  27. #include <drm/drm_atomic_helper.h>
  28. static const uint32_t virtio_gpu_formats[] = {
  29. DRM_FORMAT_XRGB8888,
  30. DRM_FORMAT_ARGB8888,
  31. DRM_FORMAT_BGRX8888,
  32. DRM_FORMAT_BGRA8888,
  33. DRM_FORMAT_RGBX8888,
  34. DRM_FORMAT_RGBA8888,
  35. DRM_FORMAT_XBGR8888,
  36. DRM_FORMAT_ABGR8888,
  37. };
  38. static const uint32_t virtio_gpu_cursor_formats[] = {
  39. #ifdef __BIG_ENDIAN
  40. DRM_FORMAT_BGRA8888,
  41. #else
  42. DRM_FORMAT_ARGB8888,
  43. #endif
  44. };
  45. uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
  46. {
  47. uint32_t format;
  48. switch (drm_fourcc) {
  49. #ifdef __BIG_ENDIAN
  50. case DRM_FORMAT_XRGB8888:
  51. format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
  52. break;
  53. case DRM_FORMAT_ARGB8888:
  54. format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
  55. break;
  56. case DRM_FORMAT_BGRX8888:
  57. format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
  58. break;
  59. case DRM_FORMAT_BGRA8888:
  60. format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
  61. break;
  62. case DRM_FORMAT_RGBX8888:
  63. format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
  64. break;
  65. case DRM_FORMAT_RGBA8888:
  66. format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
  67. break;
  68. case DRM_FORMAT_XBGR8888:
  69. format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
  70. break;
  71. case DRM_FORMAT_ABGR8888:
  72. format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
  73. break;
  74. #else
  75. case DRM_FORMAT_XRGB8888:
  76. format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
  77. break;
  78. case DRM_FORMAT_ARGB8888:
  79. format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
  80. break;
  81. case DRM_FORMAT_BGRX8888:
  82. format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
  83. break;
  84. case DRM_FORMAT_BGRA8888:
  85. format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
  86. break;
  87. case DRM_FORMAT_RGBX8888:
  88. format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
  89. break;
  90. case DRM_FORMAT_RGBA8888:
  91. format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
  92. break;
  93. case DRM_FORMAT_XBGR8888:
  94. format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
  95. break;
  96. case DRM_FORMAT_ABGR8888:
  97. format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
  98. break;
  99. #endif
  100. default:
  101. /*
  102. * This should not happen, we handle everything listed
  103. * in virtio_gpu_formats[].
  104. */
  105. format = 0;
  106. break;
  107. }
  108. WARN_ON(format == 0);
  109. return format;
  110. }
  111. static void virtio_gpu_plane_destroy(struct drm_plane *plane)
  112. {
  113. drm_plane_cleanup(plane);
  114. kfree(plane);
  115. }
  116. static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
  117. .update_plane = drm_atomic_helper_update_plane,
  118. .disable_plane = drm_atomic_helper_disable_plane,
  119. .destroy = virtio_gpu_plane_destroy,
  120. .reset = drm_atomic_helper_plane_reset,
  121. .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
  122. .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
  123. };
  124. static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
  125. struct drm_plane_state *state)
  126. {
  127. return 0;
  128. }
  129. static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
  130. struct drm_plane_state *old_state)
  131. {
  132. struct drm_device *dev = plane->dev;
  133. struct virtio_gpu_device *vgdev = dev->dev_private;
  134. struct virtio_gpu_output *output = NULL;
  135. struct virtio_gpu_framebuffer *vgfb;
  136. struct virtio_gpu_object *bo;
  137. uint32_t handle;
  138. if (plane->state->crtc)
  139. output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
  140. if (old_state->crtc)
  141. output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
  142. if (WARN_ON(!output))
  143. return;
  144. if (plane->state->fb) {
  145. vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
  146. bo = gem_to_virtio_gpu_obj(vgfb->obj);
  147. handle = bo->hw_res_handle;
  148. if (bo->dumb) {
  149. virtio_gpu_cmd_transfer_to_host_2d
  150. (vgdev, handle, 0,
  151. cpu_to_le32(plane->state->src_w >> 16),
  152. cpu_to_le32(plane->state->src_h >> 16),
  153. cpu_to_le32(plane->state->src_x >> 16),
  154. cpu_to_le32(plane->state->src_y >> 16), NULL);
  155. }
  156. } else {
  157. handle = 0;
  158. }
  159. DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
  160. plane->state->crtc_w, plane->state->crtc_h,
  161. plane->state->crtc_x, plane->state->crtc_y,
  162. plane->state->src_w >> 16,
  163. plane->state->src_h >> 16,
  164. plane->state->src_x >> 16,
  165. plane->state->src_y >> 16);
  166. virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
  167. plane->state->src_w >> 16,
  168. plane->state->src_h >> 16,
  169. plane->state->src_x >> 16,
  170. plane->state->src_y >> 16);
  171. virtio_gpu_cmd_resource_flush(vgdev, handle,
  172. plane->state->src_x >> 16,
  173. plane->state->src_y >> 16,
  174. plane->state->src_w >> 16,
  175. plane->state->src_h >> 16);
  176. }
  177. static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
  178. struct drm_plane_state *old_state)
  179. {
  180. struct drm_device *dev = plane->dev;
  181. struct virtio_gpu_device *vgdev = dev->dev_private;
  182. struct virtio_gpu_output *output = NULL;
  183. struct virtio_gpu_framebuffer *vgfb;
  184. struct virtio_gpu_fence *fence = NULL;
  185. struct virtio_gpu_object *bo = NULL;
  186. uint32_t handle;
  187. int ret = 0;
  188. if (plane->state->crtc)
  189. output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
  190. if (old_state->crtc)
  191. output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
  192. if (WARN_ON(!output))
  193. return;
  194. if (plane->state->fb) {
  195. vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
  196. bo = gem_to_virtio_gpu_obj(vgfb->obj);
  197. handle = bo->hw_res_handle;
  198. } else {
  199. handle = 0;
  200. }
  201. if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
  202. /* new cursor -- update & wait */
  203. virtio_gpu_cmd_transfer_to_host_2d
  204. (vgdev, handle, 0,
  205. cpu_to_le32(plane->state->crtc_w),
  206. cpu_to_le32(plane->state->crtc_h),
  207. 0, 0, &fence);
  208. ret = virtio_gpu_object_reserve(bo, false);
  209. if (!ret) {
  210. reservation_object_add_excl_fence(bo->tbo.resv,
  211. &fence->f);
  212. dma_fence_put(&fence->f);
  213. fence = NULL;
  214. virtio_gpu_object_unreserve(bo);
  215. virtio_gpu_object_wait(bo, false);
  216. }
  217. }
  218. if (plane->state->fb != old_state->fb) {
  219. DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
  220. plane->state->crtc_x,
  221. plane->state->crtc_y,
  222. plane->state->fb ? plane->state->fb->hot_x : 0,
  223. plane->state->fb ? plane->state->fb->hot_y : 0);
  224. output->cursor.hdr.type =
  225. cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
  226. output->cursor.resource_id = cpu_to_le32(handle);
  227. if (plane->state->fb) {
  228. output->cursor.hot_x =
  229. cpu_to_le32(plane->state->fb->hot_x);
  230. output->cursor.hot_y =
  231. cpu_to_le32(plane->state->fb->hot_y);
  232. } else {
  233. output->cursor.hot_x = cpu_to_le32(0);
  234. output->cursor.hot_y = cpu_to_le32(0);
  235. }
  236. } else {
  237. DRM_DEBUG("move +%d+%d\n",
  238. plane->state->crtc_x,
  239. plane->state->crtc_y);
  240. output->cursor.hdr.type =
  241. cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
  242. }
  243. output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
  244. output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
  245. virtio_gpu_cursor_ping(vgdev, output);
  246. }
  247. static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
  248. .atomic_check = virtio_gpu_plane_atomic_check,
  249. .atomic_update = virtio_gpu_primary_plane_update,
  250. };
  251. static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
  252. .atomic_check = virtio_gpu_plane_atomic_check,
  253. .atomic_update = virtio_gpu_cursor_plane_update,
  254. };
  255. struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
  256. enum drm_plane_type type,
  257. int index)
  258. {
  259. struct drm_device *dev = vgdev->ddev;
  260. const struct drm_plane_helper_funcs *funcs;
  261. struct drm_plane *plane;
  262. const uint32_t *formats;
  263. int ret, nformats;
  264. plane = kzalloc(sizeof(*plane), GFP_KERNEL);
  265. if (!plane)
  266. return ERR_PTR(-ENOMEM);
  267. if (type == DRM_PLANE_TYPE_CURSOR) {
  268. formats = virtio_gpu_cursor_formats;
  269. nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
  270. funcs = &virtio_gpu_cursor_helper_funcs;
  271. } else {
  272. formats = virtio_gpu_formats;
  273. nformats = ARRAY_SIZE(virtio_gpu_formats);
  274. funcs = &virtio_gpu_primary_helper_funcs;
  275. }
  276. ret = drm_universal_plane_init(dev, plane, 1 << index,
  277. &virtio_gpu_plane_funcs,
  278. formats, nformats,
  279. NULL, type, NULL);
  280. if (ret)
  281. goto err_plane_init;
  282. drm_plane_helper_add(plane, funcs);
  283. return plane;
  284. err_plane_init:
  285. kfree(plane);
  286. return ERR_PTR(ret);
  287. }