virtgpu_vq.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. /*
  2. * Copyright (C) 2015 Red Hat, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Authors:
  6. * Dave Airlie <airlied@redhat.com>
  7. * Gerd Hoffmann <kraxel@redhat.com>
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a
  10. * copy of this software and associated documentation files (the "Software"),
  11. * to deal in the Software without restriction, including without limitation
  12. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13. * and/or sell copies of the Software, and to permit persons to whom the
  14. * Software is furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice (including the next
  17. * paragraph) shall be included in all copies or substantial portions of the
  18. * Software.
  19. *
  20. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  23. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  26. * OTHER DEALINGS IN THE SOFTWARE.
  27. */
  28. #include <drm/drmP.h>
  29. #include "virtgpu_drv.h"
  30. #include <linux/virtio.h>
  31. #include <linux/virtio_config.h>
  32. #include <linux/virtio_ring.h>
  33. #define MAX_INLINE_CMD_SIZE 96
  34. #define MAX_INLINE_RESP_SIZE 24
  35. #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
  36. + MAX_INLINE_CMD_SIZE \
  37. + MAX_INLINE_RESP_SIZE)
  38. void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
  39. uint32_t *resid)
  40. {
  41. int handle;
  42. idr_preload(GFP_KERNEL);
  43. spin_lock(&vgdev->resource_idr_lock);
  44. handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
  45. spin_unlock(&vgdev->resource_idr_lock);
  46. idr_preload_end();
  47. *resid = handle;
  48. }
  49. void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
  50. {
  51. spin_lock(&vgdev->resource_idr_lock);
  52. idr_remove(&vgdev->resource_idr, id);
  53. spin_unlock(&vgdev->resource_idr_lock);
  54. }
  55. void virtio_gpu_ctrl_ack(struct virtqueue *vq)
  56. {
  57. struct drm_device *dev = vq->vdev->priv;
  58. struct virtio_gpu_device *vgdev = dev->dev_private;
  59. schedule_work(&vgdev->ctrlq.dequeue_work);
  60. }
  61. void virtio_gpu_cursor_ack(struct virtqueue *vq)
  62. {
  63. struct drm_device *dev = vq->vdev->priv;
  64. struct virtio_gpu_device *vgdev = dev->dev_private;
  65. schedule_work(&vgdev->cursorq.dequeue_work);
  66. }
  67. int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
  68. {
  69. vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
  70. VBUFFER_SIZE,
  71. __alignof__(struct virtio_gpu_vbuffer),
  72. 0, NULL);
  73. if (!vgdev->vbufs)
  74. return -ENOMEM;
  75. return 0;
  76. }
  77. void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
  78. {
  79. kmem_cache_destroy(vgdev->vbufs);
  80. vgdev->vbufs = NULL;
  81. }
  82. static struct virtio_gpu_vbuffer*
  83. virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
  84. int size, int resp_size, void *resp_buf,
  85. virtio_gpu_resp_cb resp_cb)
  86. {
  87. struct virtio_gpu_vbuffer *vbuf;
  88. vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
  89. if (!vbuf)
  90. return ERR_PTR(-ENOMEM);
  91. memset(vbuf, 0, VBUFFER_SIZE);
  92. BUG_ON(size > MAX_INLINE_CMD_SIZE);
  93. vbuf->buf = (void *)vbuf + sizeof(*vbuf);
  94. vbuf->size = size;
  95. vbuf->resp_cb = resp_cb;
  96. vbuf->resp_size = resp_size;
  97. if (resp_size <= MAX_INLINE_RESP_SIZE)
  98. vbuf->resp_buf = (void *)vbuf->buf + size;
  99. else
  100. vbuf->resp_buf = resp_buf;
  101. BUG_ON(!vbuf->resp_buf);
  102. return vbuf;
  103. }
  104. static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
  105. struct virtio_gpu_vbuffer **vbuffer_p,
  106. int size)
  107. {
  108. struct virtio_gpu_vbuffer *vbuf;
  109. vbuf = virtio_gpu_get_vbuf(vgdev, size,
  110. sizeof(struct virtio_gpu_ctrl_hdr),
  111. NULL, NULL);
  112. if (IS_ERR(vbuf)) {
  113. *vbuffer_p = NULL;
  114. return ERR_CAST(vbuf);
  115. }
  116. *vbuffer_p = vbuf;
  117. return vbuf->buf;
  118. }
  119. static struct virtio_gpu_update_cursor*
  120. virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
  121. struct virtio_gpu_vbuffer **vbuffer_p)
  122. {
  123. struct virtio_gpu_vbuffer *vbuf;
  124. vbuf = virtio_gpu_get_vbuf
  125. (vgdev, sizeof(struct virtio_gpu_update_cursor),
  126. 0, NULL, NULL);
  127. if (IS_ERR(vbuf)) {
  128. *vbuffer_p = NULL;
  129. return ERR_CAST(vbuf);
  130. }
  131. *vbuffer_p = vbuf;
  132. return (struct virtio_gpu_update_cursor *)vbuf->buf;
  133. }
  134. static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
  135. virtio_gpu_resp_cb cb,
  136. struct virtio_gpu_vbuffer **vbuffer_p,
  137. int cmd_size, int resp_size,
  138. void *resp_buf)
  139. {
  140. struct virtio_gpu_vbuffer *vbuf;
  141. vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
  142. resp_size, resp_buf, cb);
  143. if (IS_ERR(vbuf)) {
  144. *vbuffer_p = NULL;
  145. return ERR_CAST(vbuf);
  146. }
  147. *vbuffer_p = vbuf;
  148. return (struct virtio_gpu_command *)vbuf->buf;
  149. }
  150. static void free_vbuf(struct virtio_gpu_device *vgdev,
  151. struct virtio_gpu_vbuffer *vbuf)
  152. {
  153. if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
  154. kfree(vbuf->resp_buf);
  155. kfree(vbuf->data_buf);
  156. kmem_cache_free(vgdev->vbufs, vbuf);
  157. }
  158. static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
  159. {
  160. struct virtio_gpu_vbuffer *vbuf;
  161. unsigned int len;
  162. int freed = 0;
  163. while ((vbuf = virtqueue_get_buf(vq, &len))) {
  164. list_add_tail(&vbuf->list, reclaim_list);
  165. freed++;
  166. }
  167. if (freed == 0)
  168. DRM_DEBUG("Huh? zero vbufs reclaimed");
  169. }
  170. void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
  171. {
  172. struct virtio_gpu_device *vgdev =
  173. container_of(work, struct virtio_gpu_device,
  174. ctrlq.dequeue_work);
  175. struct list_head reclaim_list;
  176. struct virtio_gpu_vbuffer *entry, *tmp;
  177. struct virtio_gpu_ctrl_hdr *resp;
  178. u64 fence_id = 0;
  179. INIT_LIST_HEAD(&reclaim_list);
  180. spin_lock(&vgdev->ctrlq.qlock);
  181. do {
  182. virtqueue_disable_cb(vgdev->ctrlq.vq);
  183. reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
  184. } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
  185. spin_unlock(&vgdev->ctrlq.qlock);
  186. list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
  187. resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
  188. if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
  189. DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
  190. if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
  191. u64 f = le64_to_cpu(resp->fence_id);
  192. if (fence_id > f) {
  193. DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
  194. __func__, fence_id, f);
  195. } else {
  196. fence_id = f;
  197. }
  198. }
  199. if (entry->resp_cb)
  200. entry->resp_cb(vgdev, entry);
  201. list_del(&entry->list);
  202. free_vbuf(vgdev, entry);
  203. }
  204. wake_up(&vgdev->ctrlq.ack_queue);
  205. if (fence_id)
  206. virtio_gpu_fence_event_process(vgdev, fence_id);
  207. }
  208. void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
  209. {
  210. struct virtio_gpu_device *vgdev =
  211. container_of(work, struct virtio_gpu_device,
  212. cursorq.dequeue_work);
  213. struct list_head reclaim_list;
  214. struct virtio_gpu_vbuffer *entry, *tmp;
  215. INIT_LIST_HEAD(&reclaim_list);
  216. spin_lock(&vgdev->cursorq.qlock);
  217. do {
  218. virtqueue_disable_cb(vgdev->cursorq.vq);
  219. reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
  220. } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
  221. spin_unlock(&vgdev->cursorq.qlock);
  222. list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
  223. list_del(&entry->list);
  224. free_vbuf(vgdev, entry);
  225. }
  226. wake_up(&vgdev->cursorq.ack_queue);
  227. }
  228. static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
  229. struct virtio_gpu_vbuffer *vbuf)
  230. __releases(&vgdev->ctrlq.qlock)
  231. __acquires(&vgdev->ctrlq.qlock)
  232. {
  233. struct virtqueue *vq = vgdev->ctrlq.vq;
  234. struct scatterlist *sgs[3], vcmd, vout, vresp;
  235. int outcnt = 0, incnt = 0;
  236. int ret;
  237. if (!vgdev->vqs_ready)
  238. return -ENODEV;
  239. sg_init_one(&vcmd, vbuf->buf, vbuf->size);
  240. sgs[outcnt+incnt] = &vcmd;
  241. outcnt++;
  242. if (vbuf->data_size) {
  243. sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
  244. sgs[outcnt + incnt] = &vout;
  245. outcnt++;
  246. }
  247. if (vbuf->resp_size) {
  248. sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
  249. sgs[outcnt + incnt] = &vresp;
  250. incnt++;
  251. }
  252. retry:
  253. ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
  254. if (ret == -ENOSPC) {
  255. spin_unlock(&vgdev->ctrlq.qlock);
  256. wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
  257. spin_lock(&vgdev->ctrlq.qlock);
  258. goto retry;
  259. } else {
  260. virtqueue_kick(vq);
  261. }
  262. if (!ret)
  263. ret = vq->num_free;
  264. return ret;
  265. }
  266. static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
  267. struct virtio_gpu_vbuffer *vbuf)
  268. {
  269. int rc;
  270. spin_lock(&vgdev->ctrlq.qlock);
  271. rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
  272. spin_unlock(&vgdev->ctrlq.qlock);
  273. return rc;
  274. }
  275. static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
  276. struct virtio_gpu_vbuffer *vbuf,
  277. struct virtio_gpu_ctrl_hdr *hdr,
  278. struct virtio_gpu_fence **fence)
  279. {
  280. struct virtqueue *vq = vgdev->ctrlq.vq;
  281. int rc;
  282. again:
  283. spin_lock(&vgdev->ctrlq.qlock);
  284. /*
  285. * Make sure we have enouth space in the virtqueue. If not
  286. * wait here until we have.
  287. *
  288. * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
  289. * to wait for free space, which can result in fence ids being
  290. * submitted out-of-order.
  291. */
  292. if (vq->num_free < 3) {
  293. spin_unlock(&vgdev->ctrlq.qlock);
  294. wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
  295. goto again;
  296. }
  297. if (fence)
  298. virtio_gpu_fence_emit(vgdev, hdr, fence);
  299. rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
  300. spin_unlock(&vgdev->ctrlq.qlock);
  301. return rc;
  302. }
  303. static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
  304. struct virtio_gpu_vbuffer *vbuf)
  305. {
  306. struct virtqueue *vq = vgdev->cursorq.vq;
  307. struct scatterlist *sgs[1], ccmd;
  308. int ret;
  309. int outcnt;
  310. if (!vgdev->vqs_ready)
  311. return -ENODEV;
  312. sg_init_one(&ccmd, vbuf->buf, vbuf->size);
  313. sgs[0] = &ccmd;
  314. outcnt = 1;
  315. spin_lock(&vgdev->cursorq.qlock);
  316. retry:
  317. ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
  318. if (ret == -ENOSPC) {
  319. spin_unlock(&vgdev->cursorq.qlock);
  320. wait_event(vgdev->cursorq.ack_queue, vq->num_free);
  321. spin_lock(&vgdev->cursorq.qlock);
  322. goto retry;
  323. } else {
  324. virtqueue_kick(vq);
  325. }
  326. spin_unlock(&vgdev->cursorq.qlock);
  327. if (!ret)
  328. ret = vq->num_free;
  329. return ret;
  330. }
  331. /* just create gem objects for userspace and long lived objects,
  332. just use dma_alloced pages for the queue objects? */
  333. /* create a basic resource */
  334. void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
  335. uint32_t resource_id,
  336. uint32_t format,
  337. uint32_t width,
  338. uint32_t height)
  339. {
  340. struct virtio_gpu_resource_create_2d *cmd_p;
  341. struct virtio_gpu_vbuffer *vbuf;
  342. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  343. memset(cmd_p, 0, sizeof(*cmd_p));
  344. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
  345. cmd_p->resource_id = cpu_to_le32(resource_id);
  346. cmd_p->format = cpu_to_le32(format);
  347. cmd_p->width = cpu_to_le32(width);
  348. cmd_p->height = cpu_to_le32(height);
  349. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  350. }
  351. void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
  352. uint32_t resource_id)
  353. {
  354. struct virtio_gpu_resource_unref *cmd_p;
  355. struct virtio_gpu_vbuffer *vbuf;
  356. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  357. memset(cmd_p, 0, sizeof(*cmd_p));
  358. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
  359. cmd_p->resource_id = cpu_to_le32(resource_id);
  360. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  361. }
  362. void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
  363. uint32_t resource_id)
  364. {
  365. struct virtio_gpu_resource_detach_backing *cmd_p;
  366. struct virtio_gpu_vbuffer *vbuf;
  367. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  368. memset(cmd_p, 0, sizeof(*cmd_p));
  369. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
  370. cmd_p->resource_id = cpu_to_le32(resource_id);
  371. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  372. }
  373. void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
  374. uint32_t scanout_id, uint32_t resource_id,
  375. uint32_t width, uint32_t height,
  376. uint32_t x, uint32_t y)
  377. {
  378. struct virtio_gpu_set_scanout *cmd_p;
  379. struct virtio_gpu_vbuffer *vbuf;
  380. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  381. memset(cmd_p, 0, sizeof(*cmd_p));
  382. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
  383. cmd_p->resource_id = cpu_to_le32(resource_id);
  384. cmd_p->scanout_id = cpu_to_le32(scanout_id);
  385. cmd_p->r.width = cpu_to_le32(width);
  386. cmd_p->r.height = cpu_to_le32(height);
  387. cmd_p->r.x = cpu_to_le32(x);
  388. cmd_p->r.y = cpu_to_le32(y);
  389. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  390. }
  391. void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
  392. uint32_t resource_id,
  393. uint32_t x, uint32_t y,
  394. uint32_t width, uint32_t height)
  395. {
  396. struct virtio_gpu_resource_flush *cmd_p;
  397. struct virtio_gpu_vbuffer *vbuf;
  398. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  399. memset(cmd_p, 0, sizeof(*cmd_p));
  400. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
  401. cmd_p->resource_id = cpu_to_le32(resource_id);
  402. cmd_p->r.width = cpu_to_le32(width);
  403. cmd_p->r.height = cpu_to_le32(height);
  404. cmd_p->r.x = cpu_to_le32(x);
  405. cmd_p->r.y = cpu_to_le32(y);
  406. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  407. }
  408. void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
  409. uint32_t resource_id, uint64_t offset,
  410. __le32 width, __le32 height,
  411. __le32 x, __le32 y,
  412. struct virtio_gpu_fence **fence)
  413. {
  414. struct virtio_gpu_transfer_to_host_2d *cmd_p;
  415. struct virtio_gpu_vbuffer *vbuf;
  416. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  417. memset(cmd_p, 0, sizeof(*cmd_p));
  418. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
  419. cmd_p->resource_id = cpu_to_le32(resource_id);
  420. cmd_p->offset = cpu_to_le64(offset);
  421. cmd_p->r.width = width;
  422. cmd_p->r.height = height;
  423. cmd_p->r.x = x;
  424. cmd_p->r.y = y;
  425. virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
  426. }
  427. static void
  428. virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
  429. uint32_t resource_id,
  430. struct virtio_gpu_mem_entry *ents,
  431. uint32_t nents,
  432. struct virtio_gpu_fence **fence)
  433. {
  434. struct virtio_gpu_resource_attach_backing *cmd_p;
  435. struct virtio_gpu_vbuffer *vbuf;
  436. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  437. memset(cmd_p, 0, sizeof(*cmd_p));
  438. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
  439. cmd_p->resource_id = cpu_to_le32(resource_id);
  440. cmd_p->nr_entries = cpu_to_le32(nents);
  441. vbuf->data_buf = ents;
  442. vbuf->data_size = sizeof(*ents) * nents;
  443. virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
  444. }
  445. static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
  446. struct virtio_gpu_vbuffer *vbuf)
  447. {
  448. struct virtio_gpu_resp_display_info *resp =
  449. (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
  450. int i;
  451. spin_lock(&vgdev->display_info_lock);
  452. for (i = 0; i < vgdev->num_scanouts; i++) {
  453. vgdev->outputs[i].info = resp->pmodes[i];
  454. if (resp->pmodes[i].enabled) {
  455. DRM_DEBUG("output %d: %dx%d+%d+%d", i,
  456. le32_to_cpu(resp->pmodes[i].r.width),
  457. le32_to_cpu(resp->pmodes[i].r.height),
  458. le32_to_cpu(resp->pmodes[i].r.x),
  459. le32_to_cpu(resp->pmodes[i].r.y));
  460. } else {
  461. DRM_DEBUG("output %d: disabled", i);
  462. }
  463. }
  464. vgdev->display_info_pending = false;
  465. spin_unlock(&vgdev->display_info_lock);
  466. wake_up(&vgdev->resp_wq);
  467. if (!drm_helper_hpd_irq_event(vgdev->ddev))
  468. drm_kms_helper_hotplug_event(vgdev->ddev);
  469. }
  470. static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
  471. struct virtio_gpu_vbuffer *vbuf)
  472. {
  473. struct virtio_gpu_get_capset_info *cmd =
  474. (struct virtio_gpu_get_capset_info *)vbuf->buf;
  475. struct virtio_gpu_resp_capset_info *resp =
  476. (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
  477. int i = le32_to_cpu(cmd->capset_index);
  478. spin_lock(&vgdev->display_info_lock);
  479. vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
  480. vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
  481. vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
  482. spin_unlock(&vgdev->display_info_lock);
  483. wake_up(&vgdev->resp_wq);
  484. }
  485. static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
  486. struct virtio_gpu_vbuffer *vbuf)
  487. {
  488. struct virtio_gpu_get_capset *cmd =
  489. (struct virtio_gpu_get_capset *)vbuf->buf;
  490. struct virtio_gpu_resp_capset *resp =
  491. (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
  492. struct virtio_gpu_drv_cap_cache *cache_ent;
  493. spin_lock(&vgdev->display_info_lock);
  494. list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
  495. if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
  496. cache_ent->id == le32_to_cpu(cmd->capset_id)) {
  497. memcpy(cache_ent->caps_cache, resp->capset_data,
  498. cache_ent->size);
  499. atomic_set(&cache_ent->is_valid, 1);
  500. break;
  501. }
  502. }
  503. spin_unlock(&vgdev->display_info_lock);
  504. wake_up(&vgdev->resp_wq);
  505. }
  506. int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
  507. {
  508. struct virtio_gpu_ctrl_hdr *cmd_p;
  509. struct virtio_gpu_vbuffer *vbuf;
  510. void *resp_buf;
  511. resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
  512. GFP_KERNEL);
  513. if (!resp_buf)
  514. return -ENOMEM;
  515. cmd_p = virtio_gpu_alloc_cmd_resp
  516. (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
  517. sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
  518. resp_buf);
  519. memset(cmd_p, 0, sizeof(*cmd_p));
  520. vgdev->display_info_pending = true;
  521. cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
  522. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  523. return 0;
  524. }
  525. int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
  526. {
  527. struct virtio_gpu_get_capset_info *cmd_p;
  528. struct virtio_gpu_vbuffer *vbuf;
  529. void *resp_buf;
  530. resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
  531. GFP_KERNEL);
  532. if (!resp_buf)
  533. return -ENOMEM;
  534. cmd_p = virtio_gpu_alloc_cmd_resp
  535. (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
  536. sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
  537. resp_buf);
  538. memset(cmd_p, 0, sizeof(*cmd_p));
  539. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
  540. cmd_p->capset_index = cpu_to_le32(idx);
  541. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  542. return 0;
  543. }
  544. int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
  545. int idx, int version,
  546. struct virtio_gpu_drv_cap_cache **cache_p)
  547. {
  548. struct virtio_gpu_get_capset *cmd_p;
  549. struct virtio_gpu_vbuffer *vbuf;
  550. int max_size = vgdev->capsets[idx].max_size;
  551. struct virtio_gpu_drv_cap_cache *cache_ent;
  552. void *resp_buf;
  553. if (idx > vgdev->num_capsets)
  554. return -EINVAL;
  555. if (version > vgdev->capsets[idx].max_version)
  556. return -EINVAL;
  557. cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
  558. if (!cache_ent)
  559. return -ENOMEM;
  560. cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
  561. if (!cache_ent->caps_cache) {
  562. kfree(cache_ent);
  563. return -ENOMEM;
  564. }
  565. resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
  566. GFP_KERNEL);
  567. if (!resp_buf) {
  568. kfree(cache_ent->caps_cache);
  569. kfree(cache_ent);
  570. return -ENOMEM;
  571. }
  572. cache_ent->version = version;
  573. cache_ent->id = vgdev->capsets[idx].id;
  574. atomic_set(&cache_ent->is_valid, 0);
  575. cache_ent->size = max_size;
  576. spin_lock(&vgdev->display_info_lock);
  577. list_add_tail(&cache_ent->head, &vgdev->cap_cache);
  578. spin_unlock(&vgdev->display_info_lock);
  579. cmd_p = virtio_gpu_alloc_cmd_resp
  580. (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
  581. sizeof(struct virtio_gpu_resp_capset) + max_size,
  582. resp_buf);
  583. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
  584. cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
  585. cmd_p->capset_version = cpu_to_le32(version);
  586. *cache_p = cache_ent;
  587. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  588. return 0;
  589. }
  590. void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
  591. uint32_t nlen, const char *name)
  592. {
  593. struct virtio_gpu_ctx_create *cmd_p;
  594. struct virtio_gpu_vbuffer *vbuf;
  595. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  596. memset(cmd_p, 0, sizeof(*cmd_p));
  597. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
  598. cmd_p->hdr.ctx_id = cpu_to_le32(id);
  599. cmd_p->nlen = cpu_to_le32(nlen);
  600. strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
  601. cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
  602. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  603. }
  604. void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
  605. uint32_t id)
  606. {
  607. struct virtio_gpu_ctx_destroy *cmd_p;
  608. struct virtio_gpu_vbuffer *vbuf;
  609. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  610. memset(cmd_p, 0, sizeof(*cmd_p));
  611. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
  612. cmd_p->hdr.ctx_id = cpu_to_le32(id);
  613. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  614. }
  615. void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
  616. uint32_t ctx_id,
  617. uint32_t resource_id)
  618. {
  619. struct virtio_gpu_ctx_resource *cmd_p;
  620. struct virtio_gpu_vbuffer *vbuf;
  621. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  622. memset(cmd_p, 0, sizeof(*cmd_p));
  623. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
  624. cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
  625. cmd_p->resource_id = cpu_to_le32(resource_id);
  626. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  627. }
  628. void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
  629. uint32_t ctx_id,
  630. uint32_t resource_id)
  631. {
  632. struct virtio_gpu_ctx_resource *cmd_p;
  633. struct virtio_gpu_vbuffer *vbuf;
  634. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  635. memset(cmd_p, 0, sizeof(*cmd_p));
  636. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
  637. cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
  638. cmd_p->resource_id = cpu_to_le32(resource_id);
  639. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  640. }
  641. void
  642. virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
  643. struct virtio_gpu_resource_create_3d *rc_3d,
  644. struct virtio_gpu_fence **fence)
  645. {
  646. struct virtio_gpu_resource_create_3d *cmd_p;
  647. struct virtio_gpu_vbuffer *vbuf;
  648. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  649. memset(cmd_p, 0, sizeof(*cmd_p));
  650. *cmd_p = *rc_3d;
  651. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
  652. cmd_p->hdr.flags = 0;
  653. virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
  654. }
  655. void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
  656. uint32_t resource_id, uint32_t ctx_id,
  657. uint64_t offset, uint32_t level,
  658. struct virtio_gpu_box *box,
  659. struct virtio_gpu_fence **fence)
  660. {
  661. struct virtio_gpu_transfer_host_3d *cmd_p;
  662. struct virtio_gpu_vbuffer *vbuf;
  663. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  664. memset(cmd_p, 0, sizeof(*cmd_p));
  665. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
  666. cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
  667. cmd_p->resource_id = cpu_to_le32(resource_id);
  668. cmd_p->box = *box;
  669. cmd_p->offset = cpu_to_le64(offset);
  670. cmd_p->level = cpu_to_le32(level);
  671. virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
  672. }
  673. void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
  674. uint32_t resource_id, uint32_t ctx_id,
  675. uint64_t offset, uint32_t level,
  676. struct virtio_gpu_box *box,
  677. struct virtio_gpu_fence **fence)
  678. {
  679. struct virtio_gpu_transfer_host_3d *cmd_p;
  680. struct virtio_gpu_vbuffer *vbuf;
  681. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  682. memset(cmd_p, 0, sizeof(*cmd_p));
  683. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
  684. cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
  685. cmd_p->resource_id = cpu_to_le32(resource_id);
  686. cmd_p->box = *box;
  687. cmd_p->offset = cpu_to_le64(offset);
  688. cmd_p->level = cpu_to_le32(level);
  689. virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
  690. }
  691. void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
  692. void *data, uint32_t data_size,
  693. uint32_t ctx_id, struct virtio_gpu_fence **fence)
  694. {
  695. struct virtio_gpu_cmd_submit *cmd_p;
  696. struct virtio_gpu_vbuffer *vbuf;
  697. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  698. memset(cmd_p, 0, sizeof(*cmd_p));
  699. vbuf->data_buf = data;
  700. vbuf->data_size = data_size;
  701. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
  702. cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
  703. cmd_p->size = cpu_to_le32(data_size);
  704. virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
  705. }
  706. int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
  707. struct virtio_gpu_object *obj,
  708. uint32_t resource_id,
  709. struct virtio_gpu_fence **fence)
  710. {
  711. struct virtio_gpu_mem_entry *ents;
  712. struct scatterlist *sg;
  713. int si;
  714. if (!obj->pages) {
  715. int ret;
  716. ret = virtio_gpu_object_get_sg_table(vgdev, obj);
  717. if (ret)
  718. return ret;
  719. }
  720. /* gets freed when the ring has consumed it */
  721. ents = kmalloc_array(obj->pages->nents,
  722. sizeof(struct virtio_gpu_mem_entry),
  723. GFP_KERNEL);
  724. if (!ents) {
  725. DRM_ERROR("failed to allocate ent list\n");
  726. return -ENOMEM;
  727. }
  728. for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
  729. ents[si].addr = cpu_to_le64(sg_phys(sg));
  730. ents[si].length = cpu_to_le32(sg->length);
  731. ents[si].padding = 0;
  732. }
  733. virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
  734. ents, obj->pages->nents,
  735. fence);
  736. obj->hw_res_handle = resource_id;
  737. return 0;
  738. }
  739. void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
  740. struct virtio_gpu_output *output)
  741. {
  742. struct virtio_gpu_vbuffer *vbuf;
  743. struct virtio_gpu_update_cursor *cur_p;
  744. output->cursor.pos.scanout_id = cpu_to_le32(output->index);
  745. cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
  746. memcpy(cur_p, &output->cursor, sizeof(output->cursor));
  747. virtio_gpu_queue_cursor(vgdev, vbuf);
  748. }