virtgpu_ioctl.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561
  1. /*
  2. * Copyright (C) 2015 Red Hat, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Authors:
  6. * Dave Airlie
  7. * Alon Levy
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a
  10. * copy of this software and associated documentation files (the "Software"),
  11. * to deal in the Software without restriction, including without limitation
  12. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13. * and/or sell copies of the Software, and to permit persons to whom the
  14. * Software is furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in
  17. * all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  25. * OTHER DEALINGS IN THE SOFTWARE.
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/virtgpu_drm.h>
  29. #include <drm/ttm/ttm_execbuf_util.h>
  30. #include "virtgpu_drv.h"
  31. static void convert_to_hw_box(struct virtio_gpu_box *dst,
  32. const struct drm_virtgpu_3d_box *src)
  33. {
  34. dst->x = cpu_to_le32(src->x);
  35. dst->y = cpu_to_le32(src->y);
  36. dst->z = cpu_to_le32(src->z);
  37. dst->w = cpu_to_le32(src->w);
  38. dst->h = cpu_to_le32(src->h);
  39. dst->d = cpu_to_le32(src->d);
  40. }
  41. static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
  42. struct drm_file *file_priv)
  43. {
  44. struct virtio_gpu_device *vgdev = dev->dev_private;
  45. struct drm_virtgpu_map *virtio_gpu_map = data;
  46. return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
  47. virtio_gpu_map->handle,
  48. &virtio_gpu_map->offset);
  49. }
  50. static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
  51. struct list_head *head)
  52. {
  53. struct ttm_validate_buffer *buf;
  54. struct ttm_buffer_object *bo;
  55. struct virtio_gpu_object *qobj;
  56. int ret;
  57. ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
  58. if (ret != 0)
  59. return ret;
  60. list_for_each_entry(buf, head, head) {
  61. bo = buf->bo;
  62. qobj = container_of(bo, struct virtio_gpu_object, tbo);
  63. ret = ttm_bo_validate(bo, &qobj->placement, false, false);
  64. if (ret) {
  65. ttm_eu_backoff_reservation(ticket, head);
  66. return ret;
  67. }
  68. }
  69. return 0;
  70. }
  71. static void virtio_gpu_unref_list(struct list_head *head)
  72. {
  73. struct ttm_validate_buffer *buf;
  74. struct ttm_buffer_object *bo;
  75. struct virtio_gpu_object *qobj;
  76. list_for_each_entry(buf, head, head) {
  77. bo = buf->bo;
  78. qobj = container_of(bo, struct virtio_gpu_object, tbo);
  79. drm_gem_object_put_unlocked(&qobj->gem_base);
  80. }
  81. }
  82. /*
  83. * Usage of execbuffer:
  84. * Relocations need to take into account the full VIRTIO_GPUDrawable size.
  85. * However, the command as passed from user space must *not* contain the initial
  86. * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
  87. */
  88. static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
  89. struct drm_file *drm_file)
  90. {
  91. struct drm_virtgpu_execbuffer *exbuf = data;
  92. struct virtio_gpu_device *vgdev = dev->dev_private;
  93. struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
  94. struct drm_gem_object *gobj;
  95. struct virtio_gpu_fence *fence;
  96. struct virtio_gpu_object *qobj;
  97. int ret;
  98. uint32_t *bo_handles = NULL;
  99. void __user *user_bo_handles = NULL;
  100. struct list_head validate_list;
  101. struct ttm_validate_buffer *buflist = NULL;
  102. int i;
  103. struct ww_acquire_ctx ticket;
  104. void *buf;
  105. if (vgdev->has_virgl_3d == false)
  106. return -ENOSYS;
  107. INIT_LIST_HEAD(&validate_list);
  108. if (exbuf->num_bo_handles) {
  109. bo_handles = kvmalloc_array(exbuf->num_bo_handles,
  110. sizeof(uint32_t), GFP_KERNEL);
  111. buflist = kvmalloc_array(exbuf->num_bo_handles,
  112. sizeof(struct ttm_validate_buffer),
  113. GFP_KERNEL | __GFP_ZERO);
  114. if (!bo_handles || !buflist) {
  115. kvfree(bo_handles);
  116. kvfree(buflist);
  117. return -ENOMEM;
  118. }
  119. user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
  120. if (copy_from_user(bo_handles, user_bo_handles,
  121. exbuf->num_bo_handles * sizeof(uint32_t))) {
  122. ret = -EFAULT;
  123. kvfree(bo_handles);
  124. kvfree(buflist);
  125. return ret;
  126. }
  127. for (i = 0; i < exbuf->num_bo_handles; i++) {
  128. gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
  129. if (!gobj) {
  130. kvfree(bo_handles);
  131. kvfree(buflist);
  132. return -ENOENT;
  133. }
  134. qobj = gem_to_virtio_gpu_obj(gobj);
  135. buflist[i].bo = &qobj->tbo;
  136. list_add(&buflist[i].head, &validate_list);
  137. }
  138. kvfree(bo_handles);
  139. }
  140. ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
  141. if (ret)
  142. goto out_free;
  143. buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
  144. exbuf->size);
  145. if (IS_ERR(buf)) {
  146. ret = PTR_ERR(buf);
  147. goto out_unresv;
  148. }
  149. virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
  150. vfpriv->ctx_id, &fence);
  151. ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
  152. /* fence the command bo */
  153. virtio_gpu_unref_list(&validate_list);
  154. kvfree(buflist);
  155. dma_fence_put(&fence->f);
  156. return 0;
  157. out_unresv:
  158. ttm_eu_backoff_reservation(&ticket, &validate_list);
  159. out_free:
  160. virtio_gpu_unref_list(&validate_list);
  161. kvfree(buflist);
  162. return ret;
  163. }
  164. static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
  165. struct drm_file *file_priv)
  166. {
  167. struct virtio_gpu_device *vgdev = dev->dev_private;
  168. struct drm_virtgpu_getparam *param = data;
  169. int value;
  170. switch (param->param) {
  171. case VIRTGPU_PARAM_3D_FEATURES:
  172. value = vgdev->has_virgl_3d == true ? 1 : 0;
  173. break;
  174. default:
  175. return -EINVAL;
  176. }
  177. if (copy_to_user((void __user *)(unsigned long)param->value,
  178. &value, sizeof(int))) {
  179. return -EFAULT;
  180. }
  181. return 0;
  182. }
  183. static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
  184. struct drm_file *file_priv)
  185. {
  186. struct virtio_gpu_device *vgdev = dev->dev_private;
  187. struct drm_virtgpu_resource_create *rc = data;
  188. int ret;
  189. uint32_t res_id;
  190. struct virtio_gpu_object *qobj;
  191. struct drm_gem_object *obj;
  192. uint32_t handle = 0;
  193. uint32_t size;
  194. struct list_head validate_list;
  195. struct ttm_validate_buffer mainbuf;
  196. struct virtio_gpu_fence *fence = NULL;
  197. struct ww_acquire_ctx ticket;
  198. struct virtio_gpu_resource_create_3d rc_3d;
  199. if (vgdev->has_virgl_3d == false) {
  200. if (rc->depth > 1)
  201. return -EINVAL;
  202. if (rc->nr_samples > 1)
  203. return -EINVAL;
  204. if (rc->last_level > 1)
  205. return -EINVAL;
  206. if (rc->target != 2)
  207. return -EINVAL;
  208. if (rc->array_size > 1)
  209. return -EINVAL;
  210. }
  211. INIT_LIST_HEAD(&validate_list);
  212. memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
  213. virtio_gpu_resource_id_get(vgdev, &res_id);
  214. size = rc->size;
  215. /* allocate a single page size object */
  216. if (size == 0)
  217. size = PAGE_SIZE;
  218. qobj = virtio_gpu_alloc_object(dev, size, false, false);
  219. if (IS_ERR(qobj)) {
  220. ret = PTR_ERR(qobj);
  221. goto fail_id;
  222. }
  223. obj = &qobj->gem_base;
  224. if (!vgdev->has_virgl_3d) {
  225. virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
  226. rc->width, rc->height);
  227. ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
  228. } else {
  229. /* use a gem reference since unref list undoes them */
  230. drm_gem_object_get(&qobj->gem_base);
  231. mainbuf.bo = &qobj->tbo;
  232. list_add(&mainbuf.head, &validate_list);
  233. ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
  234. if (ret) {
  235. DRM_DEBUG("failed to validate\n");
  236. goto fail_unref;
  237. }
  238. rc_3d.resource_id = cpu_to_le32(res_id);
  239. rc_3d.target = cpu_to_le32(rc->target);
  240. rc_3d.format = cpu_to_le32(rc->format);
  241. rc_3d.bind = cpu_to_le32(rc->bind);
  242. rc_3d.width = cpu_to_le32(rc->width);
  243. rc_3d.height = cpu_to_le32(rc->height);
  244. rc_3d.depth = cpu_to_le32(rc->depth);
  245. rc_3d.array_size = cpu_to_le32(rc->array_size);
  246. rc_3d.last_level = cpu_to_le32(rc->last_level);
  247. rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
  248. rc_3d.flags = cpu_to_le32(rc->flags);
  249. virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
  250. ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
  251. if (ret) {
  252. ttm_eu_backoff_reservation(&ticket, &validate_list);
  253. goto fail_unref;
  254. }
  255. ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
  256. }
  257. qobj->hw_res_handle = res_id;
  258. ret = drm_gem_handle_create(file_priv, obj, &handle);
  259. if (ret) {
  260. drm_gem_object_release(obj);
  261. if (vgdev->has_virgl_3d) {
  262. virtio_gpu_unref_list(&validate_list);
  263. dma_fence_put(&fence->f);
  264. }
  265. return ret;
  266. }
  267. drm_gem_object_put_unlocked(obj);
  268. rc->res_handle = res_id; /* similiar to a VM address */
  269. rc->bo_handle = handle;
  270. if (vgdev->has_virgl_3d) {
  271. virtio_gpu_unref_list(&validate_list);
  272. dma_fence_put(&fence->f);
  273. }
  274. return 0;
  275. fail_unref:
  276. if (vgdev->has_virgl_3d) {
  277. virtio_gpu_unref_list(&validate_list);
  278. dma_fence_put(&fence->f);
  279. }
  280. //fail_obj:
  281. // drm_gem_object_handle_unreference_unlocked(obj);
  282. fail_id:
  283. virtio_gpu_resource_id_put(vgdev, res_id);
  284. return ret;
  285. }
  286. static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
  287. struct drm_file *file_priv)
  288. {
  289. struct drm_virtgpu_resource_info *ri = data;
  290. struct drm_gem_object *gobj = NULL;
  291. struct virtio_gpu_object *qobj = NULL;
  292. gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
  293. if (gobj == NULL)
  294. return -ENOENT;
  295. qobj = gem_to_virtio_gpu_obj(gobj);
  296. ri->size = qobj->gem_base.size;
  297. ri->res_handle = qobj->hw_res_handle;
  298. drm_gem_object_put_unlocked(gobj);
  299. return 0;
  300. }
  301. static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
  302. void *data,
  303. struct drm_file *file)
  304. {
  305. struct virtio_gpu_device *vgdev = dev->dev_private;
  306. struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
  307. struct drm_virtgpu_3d_transfer_from_host *args = data;
  308. struct drm_gem_object *gobj = NULL;
  309. struct virtio_gpu_object *qobj = NULL;
  310. struct virtio_gpu_fence *fence;
  311. int ret;
  312. u32 offset = args->offset;
  313. struct virtio_gpu_box box;
  314. if (vgdev->has_virgl_3d == false)
  315. return -ENOSYS;
  316. gobj = drm_gem_object_lookup(file, args->bo_handle);
  317. if (gobj == NULL)
  318. return -ENOENT;
  319. qobj = gem_to_virtio_gpu_obj(gobj);
  320. ret = virtio_gpu_object_reserve(qobj, false);
  321. if (ret)
  322. goto out;
  323. ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
  324. true, false);
  325. if (unlikely(ret))
  326. goto out_unres;
  327. convert_to_hw_box(&box, &args->box);
  328. virtio_gpu_cmd_transfer_from_host_3d
  329. (vgdev, qobj->hw_res_handle,
  330. vfpriv->ctx_id, offset, args->level,
  331. &box, &fence);
  332. reservation_object_add_excl_fence(qobj->tbo.resv,
  333. &fence->f);
  334. dma_fence_put(&fence->f);
  335. out_unres:
  336. virtio_gpu_object_unreserve(qobj);
  337. out:
  338. drm_gem_object_put_unlocked(gobj);
  339. return ret;
  340. }
  341. static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
  342. struct drm_file *file)
  343. {
  344. struct virtio_gpu_device *vgdev = dev->dev_private;
  345. struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
  346. struct drm_virtgpu_3d_transfer_to_host *args = data;
  347. struct drm_gem_object *gobj = NULL;
  348. struct virtio_gpu_object *qobj = NULL;
  349. struct virtio_gpu_fence *fence;
  350. struct virtio_gpu_box box;
  351. int ret;
  352. u32 offset = args->offset;
  353. gobj = drm_gem_object_lookup(file, args->bo_handle);
  354. if (gobj == NULL)
  355. return -ENOENT;
  356. qobj = gem_to_virtio_gpu_obj(gobj);
  357. ret = virtio_gpu_object_reserve(qobj, false);
  358. if (ret)
  359. goto out;
  360. ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
  361. true, false);
  362. if (unlikely(ret))
  363. goto out_unres;
  364. convert_to_hw_box(&box, &args->box);
  365. if (!vgdev->has_virgl_3d) {
  366. virtio_gpu_cmd_transfer_to_host_2d
  367. (vgdev, qobj->hw_res_handle, offset,
  368. box.w, box.h, box.x, box.y, NULL);
  369. } else {
  370. virtio_gpu_cmd_transfer_to_host_3d
  371. (vgdev, qobj->hw_res_handle,
  372. vfpriv ? vfpriv->ctx_id : 0, offset,
  373. args->level, &box, &fence);
  374. reservation_object_add_excl_fence(qobj->tbo.resv,
  375. &fence->f);
  376. dma_fence_put(&fence->f);
  377. }
  378. out_unres:
  379. virtio_gpu_object_unreserve(qobj);
  380. out:
  381. drm_gem_object_put_unlocked(gobj);
  382. return ret;
  383. }
  384. static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
  385. struct drm_file *file)
  386. {
  387. struct drm_virtgpu_3d_wait *args = data;
  388. struct drm_gem_object *gobj = NULL;
  389. struct virtio_gpu_object *qobj = NULL;
  390. int ret;
  391. bool nowait = false;
  392. gobj = drm_gem_object_lookup(file, args->handle);
  393. if (gobj == NULL)
  394. return -ENOENT;
  395. qobj = gem_to_virtio_gpu_obj(gobj);
  396. if (args->flags & VIRTGPU_WAIT_NOWAIT)
  397. nowait = true;
  398. ret = virtio_gpu_object_wait(qobj, nowait);
  399. drm_gem_object_put_unlocked(gobj);
  400. return ret;
  401. }
  402. static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
  403. void *data, struct drm_file *file)
  404. {
  405. struct virtio_gpu_device *vgdev = dev->dev_private;
  406. struct drm_virtgpu_get_caps *args = data;
  407. int size;
  408. int i;
  409. int found_valid = -1;
  410. int ret;
  411. struct virtio_gpu_drv_cap_cache *cache_ent;
  412. void *ptr;
  413. if (vgdev->num_capsets == 0)
  414. return -ENOSYS;
  415. spin_lock(&vgdev->display_info_lock);
  416. for (i = 0; i < vgdev->num_capsets; i++) {
  417. if (vgdev->capsets[i].id == args->cap_set_id) {
  418. if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
  419. found_valid = i;
  420. break;
  421. }
  422. }
  423. }
  424. if (found_valid == -1) {
  425. spin_unlock(&vgdev->display_info_lock);
  426. return -EINVAL;
  427. }
  428. size = vgdev->capsets[found_valid].max_size;
  429. if (args->size > size) {
  430. spin_unlock(&vgdev->display_info_lock);
  431. return -EINVAL;
  432. }
  433. list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
  434. if (cache_ent->id == args->cap_set_id &&
  435. cache_ent->version == args->cap_set_ver) {
  436. ptr = cache_ent->caps_cache;
  437. spin_unlock(&vgdev->display_info_lock);
  438. goto copy_exit;
  439. }
  440. }
  441. spin_unlock(&vgdev->display_info_lock);
  442. /* not in cache - need to talk to hw */
  443. virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
  444. &cache_ent);
  445. ret = wait_event_timeout(vgdev->resp_wq,
  446. atomic_read(&cache_ent->is_valid), 5 * HZ);
  447. ptr = cache_ent->caps_cache;
  448. copy_exit:
  449. if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
  450. return -EFAULT;
  451. return 0;
  452. }
  453. struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
  454. DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
  455. DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  456. DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
  457. DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  458. DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
  459. DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  460. DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
  461. virtio_gpu_resource_create_ioctl,
  462. DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  463. DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
  464. DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  465. /* make transfer async to the main ring? - no sure, can we
  466. thread these in the underlying GL */
  467. DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
  468. virtio_gpu_transfer_from_host_ioctl,
  469. DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  470. DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
  471. virtio_gpu_transfer_to_host_ioctl,
  472. DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  473. DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
  474. DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  475. DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
  476. DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  477. };