virtgpu_fb.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. /*
  2. * Copyright (C) 2015 Red Hat, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining
  6. * a copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sublicense, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial
  15. * portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. */
  25. #include <drm/drmP.h>
  26. #include <drm/drm_fb_helper.h>
  27. #include "virtgpu_drv.h"
  28. #define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
  29. static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
  30. bool store, int x, int y,
  31. int width, int height)
  32. {
  33. struct drm_device *dev = fb->base.dev;
  34. struct virtio_gpu_device *vgdev = dev->dev_private;
  35. bool store_for_later = false;
  36. int bpp = fb->base.format->cpp[0];
  37. int x2, y2;
  38. unsigned long flags;
  39. struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
  40. if ((width <= 0) ||
  41. (x + width > fb->base.width) ||
  42. (y + height > fb->base.height)) {
  43. DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n",
  44. width, height, x, y,
  45. fb->base.width, fb->base.height);
  46. return -EINVAL;
  47. }
  48. /*
  49. * Can be called with pretty much any context (console output
  50. * path). If we are in atomic just store the dirty rect info
  51. * to send out the update later.
  52. *
  53. * Can't test inside spin lock.
  54. */
  55. if (in_atomic() || store)
  56. store_for_later = true;
  57. x2 = x + width - 1;
  58. y2 = y + height - 1;
  59. spin_lock_irqsave(&fb->dirty_lock, flags);
  60. if (fb->y1 < y)
  61. y = fb->y1;
  62. if (fb->y2 > y2)
  63. y2 = fb->y2;
  64. if (fb->x1 < x)
  65. x = fb->x1;
  66. if (fb->x2 > x2)
  67. x2 = fb->x2;
  68. if (store_for_later) {
  69. fb->x1 = x;
  70. fb->x2 = x2;
  71. fb->y1 = y;
  72. fb->y2 = y2;
  73. spin_unlock_irqrestore(&fb->dirty_lock, flags);
  74. return 0;
  75. }
  76. fb->x1 = fb->y1 = INT_MAX;
  77. fb->x2 = fb->y2 = 0;
  78. spin_unlock_irqrestore(&fb->dirty_lock, flags);
  79. {
  80. uint32_t offset;
  81. uint32_t w = x2 - x + 1;
  82. uint32_t h = y2 - y + 1;
  83. offset = (y * fb->base.pitches[0]) + x * bpp;
  84. virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj,
  85. offset,
  86. cpu_to_le32(w),
  87. cpu_to_le32(h),
  88. cpu_to_le32(x),
  89. cpu_to_le32(y),
  90. NULL);
  91. }
  92. virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
  93. x, y, x2 - x + 1, y2 - y + 1);
  94. return 0;
  95. }
  96. int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
  97. struct drm_clip_rect *clips,
  98. unsigned int num_clips)
  99. {
  100. struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
  101. struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
  102. struct drm_clip_rect norect;
  103. struct drm_clip_rect *clips_ptr;
  104. int left, right, top, bottom;
  105. int i;
  106. int inc = 1;
  107. if (!num_clips) {
  108. num_clips = 1;
  109. clips = &norect;
  110. norect.x1 = norect.y1 = 0;
  111. norect.x2 = vgfb->base.width;
  112. norect.y2 = vgfb->base.height;
  113. }
  114. left = clips->x1;
  115. right = clips->x2;
  116. top = clips->y1;
  117. bottom = clips->y2;
  118. /* skip the first clip rect */
  119. for (i = 1, clips_ptr = clips + inc;
  120. i < num_clips; i++, clips_ptr += inc) {
  121. left = min_t(int, left, (int)clips_ptr->x1);
  122. right = max_t(int, right, (int)clips_ptr->x2);
  123. top = min_t(int, top, (int)clips_ptr->y1);
  124. bottom = max_t(int, bottom, (int)clips_ptr->y2);
  125. }
  126. if (obj->dumb)
  127. return virtio_gpu_dirty_update(vgfb, false, left, top,
  128. right - left, bottom - top);
  129. virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
  130. left, top, right - left, bottom - top);
  131. return 0;
  132. }
  133. static void virtio_gpu_fb_dirty_work(struct work_struct *work)
  134. {
  135. struct delayed_work *delayed_work = to_delayed_work(work);
  136. struct virtio_gpu_fbdev *vfbdev =
  137. container_of(delayed_work, struct virtio_gpu_fbdev, work);
  138. struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb;
  139. virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1,
  140. vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1);
  141. }
  142. static void virtio_gpu_3d_fillrect(struct fb_info *info,
  143. const struct fb_fillrect *rect)
  144. {
  145. struct virtio_gpu_fbdev *vfbdev = info->par;
  146. drm_fb_helper_sys_fillrect(info, rect);
  147. virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
  148. rect->width, rect->height);
  149. schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
  150. }
  151. static void virtio_gpu_3d_copyarea(struct fb_info *info,
  152. const struct fb_copyarea *area)
  153. {
  154. struct virtio_gpu_fbdev *vfbdev = info->par;
  155. drm_fb_helper_sys_copyarea(info, area);
  156. virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
  157. area->width, area->height);
  158. schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
  159. }
  160. static void virtio_gpu_3d_imageblit(struct fb_info *info,
  161. const struct fb_image *image)
  162. {
  163. struct virtio_gpu_fbdev *vfbdev = info->par;
  164. drm_fb_helper_sys_imageblit(info, image);
  165. virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
  166. image->width, image->height);
  167. schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
  168. }
  169. static struct fb_ops virtio_gpufb_ops = {
  170. .owner = THIS_MODULE,
  171. DRM_FB_HELPER_DEFAULT_OPS,
  172. .fb_fillrect = virtio_gpu_3d_fillrect,
  173. .fb_copyarea = virtio_gpu_3d_copyarea,
  174. .fb_imageblit = virtio_gpu_3d_imageblit,
  175. };
  176. static int virtio_gpufb_create(struct drm_fb_helper *helper,
  177. struct drm_fb_helper_surface_size *sizes)
  178. {
  179. struct virtio_gpu_fbdev *vfbdev =
  180. container_of(helper, struct virtio_gpu_fbdev, helper);
  181. struct drm_device *dev = helper->dev;
  182. struct virtio_gpu_device *vgdev = dev->dev_private;
  183. struct fb_info *info;
  184. struct drm_framebuffer *fb;
  185. struct drm_mode_fb_cmd2 mode_cmd = {};
  186. struct virtio_gpu_object *obj;
  187. uint32_t resid, format, size;
  188. int ret;
  189. mode_cmd.width = sizes->surface_width;
  190. mode_cmd.height = sizes->surface_height;
  191. mode_cmd.pitches[0] = mode_cmd.width * 4;
  192. mode_cmd.pixel_format = DRM_FORMAT_HOST_XRGB8888;
  193. format = virtio_gpu_translate_format(mode_cmd.pixel_format);
  194. if (format == 0)
  195. return -EINVAL;
  196. size = mode_cmd.pitches[0] * mode_cmd.height;
  197. obj = virtio_gpu_alloc_object(dev, size, false, true);
  198. if (IS_ERR(obj))
  199. return PTR_ERR(obj);
  200. virtio_gpu_resource_id_get(vgdev, &resid);
  201. virtio_gpu_cmd_create_resource(vgdev, resid, format,
  202. mode_cmd.width, mode_cmd.height);
  203. ret = virtio_gpu_object_kmap(obj);
  204. if (ret) {
  205. DRM_ERROR("failed to kmap fb %d\n", ret);
  206. goto err_obj_vmap;
  207. }
  208. /* attach the object to the resource */
  209. ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
  210. if (ret)
  211. goto err_obj_attach;
  212. info = drm_fb_helper_alloc_fbi(helper);
  213. if (IS_ERR(info)) {
  214. ret = PTR_ERR(info);
  215. goto err_fb_alloc;
  216. }
  217. info->par = helper;
  218. ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
  219. &mode_cmd, &obj->gem_base);
  220. if (ret)
  221. goto err_fb_alloc;
  222. fb = &vfbdev->vgfb.base;
  223. vfbdev->helper.fb = fb;
  224. strcpy(info->fix.id, "virtiodrmfb");
  225. info->fbops = &virtio_gpufb_ops;
  226. info->pixmap.flags = FB_PIXMAP_SYSTEM;
  227. info->screen_buffer = obj->vmap;
  228. info->screen_size = obj->gem_base.size;
  229. drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
  230. drm_fb_helper_fill_var(info, &vfbdev->helper,
  231. sizes->fb_width, sizes->fb_height);
  232. info->fix.mmio_start = 0;
  233. info->fix.mmio_len = 0;
  234. return 0;
  235. err_fb_alloc:
  236. virtio_gpu_object_detach(vgdev, obj);
  237. err_obj_attach:
  238. err_obj_vmap:
  239. virtio_gpu_gem_free_object(&obj->gem_base);
  240. return ret;
  241. }
  242. static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
  243. struct virtio_gpu_fbdev *vgfbdev)
  244. {
  245. struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
  246. drm_fb_helper_unregister_fbi(&vgfbdev->helper);
  247. if (vgfb->base.obj[0])
  248. vgfb->base.obj[0] = NULL;
  249. drm_fb_helper_fini(&vgfbdev->helper);
  250. drm_framebuffer_cleanup(&vgfb->base);
  251. return 0;
  252. }
  253. static const struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
  254. .fb_probe = virtio_gpufb_create,
  255. };
  256. int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev)
  257. {
  258. struct virtio_gpu_fbdev *vgfbdev;
  259. int bpp_sel = 32; /* TODO: parameter from somewhere? */
  260. int ret;
  261. vgfbdev = kzalloc(sizeof(struct virtio_gpu_fbdev), GFP_KERNEL);
  262. if (!vgfbdev)
  263. return -ENOMEM;
  264. vgfbdev->vgdev = vgdev;
  265. vgdev->vgfbdev = vgfbdev;
  266. INIT_DELAYED_WORK(&vgfbdev->work, virtio_gpu_fb_dirty_work);
  267. drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper,
  268. &virtio_gpu_fb_helper_funcs);
  269. ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
  270. VIRTIO_GPUFB_CONN_LIMIT);
  271. if (ret) {
  272. kfree(vgfbdev);
  273. return ret;
  274. }
  275. drm_fb_helper_single_add_all_connectors(&vgfbdev->helper);
  276. drm_fb_helper_initial_config(&vgfbdev->helper, bpp_sel);
  277. return 0;
  278. }
  279. void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev)
  280. {
  281. if (!vgdev->vgfbdev)
  282. return;
  283. virtio_gpu_fbdev_destroy(vgdev->ddev, vgdev->vgfbdev);
  284. kfree(vgdev->vgfbdev);
  285. vgdev->vgfbdev = NULL;
  286. }