virtgpu_vq.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895
  1. /*
  2. * Copyright (C) 2015 Red Hat, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Authors:
  6. * Dave Airlie <airlied@redhat.com>
  7. * Gerd Hoffmann <kraxel@redhat.com>
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a
  10. * copy of this software and associated documentation files (the "Software"),
  11. * to deal in the Software without restriction, including without limitation
  12. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13. * and/or sell copies of the Software, and to permit persons to whom the
  14. * Software is furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice (including the next
  17. * paragraph) shall be included in all copies or substantial portions of the
  18. * Software.
  19. *
  20. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  23. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  26. * OTHER DEALINGS IN THE SOFTWARE.
  27. */
  28. #include <drm/drmP.h>
  29. #include "virtgpu_drv.h"
  30. #include <linux/virtio.h>
  31. #include <linux/virtio_config.h>
  32. #include <linux/virtio_ring.h>
  33. #define MAX_INLINE_CMD_SIZE 96
  34. #define MAX_INLINE_RESP_SIZE 24
  35. #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
  36. + MAX_INLINE_CMD_SIZE \
  37. + MAX_INLINE_RESP_SIZE)
  38. void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
  39. uint32_t *resid)
  40. {
  41. int handle;
  42. idr_preload(GFP_KERNEL);
  43. spin_lock(&vgdev->resource_idr_lock);
  44. handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
  45. spin_unlock(&vgdev->resource_idr_lock);
  46. idr_preload_end();
  47. *resid = handle;
  48. }
  49. void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
  50. {
  51. spin_lock(&vgdev->resource_idr_lock);
  52. idr_remove(&vgdev->resource_idr, id);
  53. spin_unlock(&vgdev->resource_idr_lock);
  54. }
  55. void virtio_gpu_ctrl_ack(struct virtqueue *vq)
  56. {
  57. struct drm_device *dev = vq->vdev->priv;
  58. struct virtio_gpu_device *vgdev = dev->dev_private;
  59. schedule_work(&vgdev->ctrlq.dequeue_work);
  60. }
  61. void virtio_gpu_cursor_ack(struct virtqueue *vq)
  62. {
  63. struct drm_device *dev = vq->vdev->priv;
  64. struct virtio_gpu_device *vgdev = dev->dev_private;
  65. schedule_work(&vgdev->cursorq.dequeue_work);
  66. }
  67. int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
  68. {
  69. vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
  70. VBUFFER_SIZE,
  71. __alignof__(struct virtio_gpu_vbuffer),
  72. 0, NULL);
  73. if (!vgdev->vbufs)
  74. return -ENOMEM;
  75. return 0;
  76. }
  77. void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
  78. {
  79. kmem_cache_destroy(vgdev->vbufs);
  80. vgdev->vbufs = NULL;
  81. }
  82. static struct virtio_gpu_vbuffer*
  83. virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
  84. int size, int resp_size, void *resp_buf,
  85. virtio_gpu_resp_cb resp_cb)
  86. {
  87. struct virtio_gpu_vbuffer *vbuf;
  88. vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
  89. if (!vbuf)
  90. return ERR_PTR(-ENOMEM);
  91. memset(vbuf, 0, VBUFFER_SIZE);
  92. BUG_ON(size > MAX_INLINE_CMD_SIZE);
  93. vbuf->buf = (void *)vbuf + sizeof(*vbuf);
  94. vbuf->size = size;
  95. vbuf->resp_cb = resp_cb;
  96. vbuf->resp_size = resp_size;
  97. if (resp_size <= MAX_INLINE_RESP_SIZE)
  98. vbuf->resp_buf = (void *)vbuf->buf + size;
  99. else
  100. vbuf->resp_buf = resp_buf;
  101. BUG_ON(!vbuf->resp_buf);
  102. return vbuf;
  103. }
  104. static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
  105. struct virtio_gpu_vbuffer **vbuffer_p,
  106. int size)
  107. {
  108. struct virtio_gpu_vbuffer *vbuf;
  109. vbuf = virtio_gpu_get_vbuf(vgdev, size,
  110. sizeof(struct virtio_gpu_ctrl_hdr),
  111. NULL, NULL);
  112. if (IS_ERR(vbuf)) {
  113. *vbuffer_p = NULL;
  114. return ERR_CAST(vbuf);
  115. }
  116. *vbuffer_p = vbuf;
  117. return vbuf->buf;
  118. }
  119. static struct virtio_gpu_update_cursor*
  120. virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
  121. struct virtio_gpu_vbuffer **vbuffer_p)
  122. {
  123. struct virtio_gpu_vbuffer *vbuf;
  124. vbuf = virtio_gpu_get_vbuf
  125. (vgdev, sizeof(struct virtio_gpu_update_cursor),
  126. 0, NULL, NULL);
  127. if (IS_ERR(vbuf)) {
  128. *vbuffer_p = NULL;
  129. return ERR_CAST(vbuf);
  130. }
  131. *vbuffer_p = vbuf;
  132. return (struct virtio_gpu_update_cursor *)vbuf->buf;
  133. }
  134. static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
  135. virtio_gpu_resp_cb cb,
  136. struct virtio_gpu_vbuffer **vbuffer_p,
  137. int cmd_size, int resp_size,
  138. void *resp_buf)
  139. {
  140. struct virtio_gpu_vbuffer *vbuf;
  141. vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
  142. resp_size, resp_buf, cb);
  143. if (IS_ERR(vbuf)) {
  144. *vbuffer_p = NULL;
  145. return ERR_CAST(vbuf);
  146. }
  147. *vbuffer_p = vbuf;
  148. return (struct virtio_gpu_command *)vbuf->buf;
  149. }
  150. static void free_vbuf(struct virtio_gpu_device *vgdev,
  151. struct virtio_gpu_vbuffer *vbuf)
  152. {
  153. if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
  154. kfree(vbuf->resp_buf);
  155. kfree(vbuf->data_buf);
  156. kmem_cache_free(vgdev->vbufs, vbuf);
  157. }
  158. static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
  159. {
  160. struct virtio_gpu_vbuffer *vbuf;
  161. unsigned int len;
  162. int freed = 0;
  163. while ((vbuf = virtqueue_get_buf(vq, &len))) {
  164. list_add_tail(&vbuf->list, reclaim_list);
  165. freed++;
  166. }
  167. if (freed == 0)
  168. DRM_DEBUG("Huh? zero vbufs reclaimed");
  169. }
  170. void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
  171. {
  172. struct virtio_gpu_device *vgdev =
  173. container_of(work, struct virtio_gpu_device,
  174. ctrlq.dequeue_work);
  175. struct list_head reclaim_list;
  176. struct virtio_gpu_vbuffer *entry, *tmp;
  177. struct virtio_gpu_ctrl_hdr *resp;
  178. u64 fence_id = 0;
  179. INIT_LIST_HEAD(&reclaim_list);
  180. spin_lock(&vgdev->ctrlq.qlock);
  181. do {
  182. virtqueue_disable_cb(vgdev->ctrlq.vq);
  183. reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
  184. } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
  185. spin_unlock(&vgdev->ctrlq.qlock);
  186. list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
  187. resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
  188. if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
  189. DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
  190. if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
  191. u64 f = le64_to_cpu(resp->fence_id);
  192. if (fence_id > f) {
  193. DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
  194. __func__, fence_id, f);
  195. } else {
  196. fence_id = f;
  197. }
  198. }
  199. if (entry->resp_cb)
  200. entry->resp_cb(vgdev, entry);
  201. list_del(&entry->list);
  202. free_vbuf(vgdev, entry);
  203. }
  204. wake_up(&vgdev->ctrlq.ack_queue);
  205. if (fence_id)
  206. virtio_gpu_fence_event_process(vgdev, fence_id);
  207. }
  208. void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
  209. {
  210. struct virtio_gpu_device *vgdev =
  211. container_of(work, struct virtio_gpu_device,
  212. cursorq.dequeue_work);
  213. struct list_head reclaim_list;
  214. struct virtio_gpu_vbuffer *entry, *tmp;
  215. INIT_LIST_HEAD(&reclaim_list);
  216. spin_lock(&vgdev->cursorq.qlock);
  217. do {
  218. virtqueue_disable_cb(vgdev->cursorq.vq);
  219. reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
  220. } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
  221. spin_unlock(&vgdev->cursorq.qlock);
  222. list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
  223. list_del(&entry->list);
  224. free_vbuf(vgdev, entry);
  225. }
  226. wake_up(&vgdev->cursorq.ack_queue);
  227. }
  228. static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
  229. struct virtio_gpu_vbuffer *vbuf)
  230. __releases(&vgdev->ctrlq.qlock)
  231. __acquires(&vgdev->ctrlq.qlock)
  232. {
  233. struct virtqueue *vq = vgdev->ctrlq.vq;
  234. struct scatterlist *sgs[3], vcmd, vout, vresp;
  235. int outcnt = 0, incnt = 0;
  236. int ret;
  237. if (!vgdev->vqs_ready)
  238. return -ENODEV;
  239. sg_init_one(&vcmd, vbuf->buf, vbuf->size);
  240. sgs[outcnt + incnt] = &vcmd;
  241. outcnt++;
  242. if (vbuf->data_size) {
  243. sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
  244. sgs[outcnt + incnt] = &vout;
  245. outcnt++;
  246. }
  247. if (vbuf->resp_size) {
  248. sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
  249. sgs[outcnt + incnt] = &vresp;
  250. incnt++;
  251. }
  252. retry:
  253. ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
  254. if (ret == -ENOSPC) {
  255. spin_unlock(&vgdev->ctrlq.qlock);
  256. wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
  257. spin_lock(&vgdev->ctrlq.qlock);
  258. goto retry;
  259. } else {
  260. virtqueue_kick(vq);
  261. }
  262. if (!ret)
  263. ret = vq->num_free;
  264. return ret;
  265. }
  266. static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
  267. struct virtio_gpu_vbuffer *vbuf)
  268. {
  269. int rc;
  270. spin_lock(&vgdev->ctrlq.qlock);
  271. rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
  272. spin_unlock(&vgdev->ctrlq.qlock);
  273. return rc;
  274. }
  275. static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
  276. struct virtio_gpu_vbuffer *vbuf,
  277. struct virtio_gpu_ctrl_hdr *hdr,
  278. struct virtio_gpu_fence **fence)
  279. {
  280. struct virtqueue *vq = vgdev->ctrlq.vq;
  281. int rc;
  282. again:
  283. spin_lock(&vgdev->ctrlq.qlock);
  284. /*
  285. * Make sure we have enouth space in the virtqueue. If not
  286. * wait here until we have.
  287. *
  288. * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
  289. * to wait for free space, which can result in fence ids being
  290. * submitted out-of-order.
  291. */
  292. if (vq->num_free < 3) {
  293. spin_unlock(&vgdev->ctrlq.qlock);
  294. wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
  295. goto again;
  296. }
  297. if (fence)
  298. virtio_gpu_fence_emit(vgdev, hdr, fence);
  299. rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
  300. spin_unlock(&vgdev->ctrlq.qlock);
  301. return rc;
  302. }
  303. static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
  304. struct virtio_gpu_vbuffer *vbuf)
  305. {
  306. struct virtqueue *vq = vgdev->cursorq.vq;
  307. struct scatterlist *sgs[1], ccmd;
  308. int ret;
  309. int outcnt;
  310. if (!vgdev->vqs_ready)
  311. return -ENODEV;
  312. sg_init_one(&ccmd, vbuf->buf, vbuf->size);
  313. sgs[0] = &ccmd;
  314. outcnt = 1;
  315. spin_lock(&vgdev->cursorq.qlock);
  316. retry:
  317. ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
  318. if (ret == -ENOSPC) {
  319. spin_unlock(&vgdev->cursorq.qlock);
  320. wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
  321. spin_lock(&vgdev->cursorq.qlock);
  322. goto retry;
  323. } else {
  324. virtqueue_kick(vq);
  325. }
  326. spin_unlock(&vgdev->cursorq.qlock);
  327. if (!ret)
  328. ret = vq->num_free;
  329. return ret;
  330. }
  331. /* just create gem objects for userspace and long lived objects,
  332. * just use dma_alloced pages for the queue objects?
  333. */
  334. /* create a basic resource */
  335. void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
  336. uint32_t resource_id,
  337. uint32_t format,
  338. uint32_t width,
  339. uint32_t height)
  340. {
  341. struct virtio_gpu_resource_create_2d *cmd_p;
  342. struct virtio_gpu_vbuffer *vbuf;
  343. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  344. memset(cmd_p, 0, sizeof(*cmd_p));
  345. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
  346. cmd_p->resource_id = cpu_to_le32(resource_id);
  347. cmd_p->format = cpu_to_le32(format);
  348. cmd_p->width = cpu_to_le32(width);
  349. cmd_p->height = cpu_to_le32(height);
  350. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  351. }
  352. void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
  353. uint32_t resource_id)
  354. {
  355. struct virtio_gpu_resource_unref *cmd_p;
  356. struct virtio_gpu_vbuffer *vbuf;
  357. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  358. memset(cmd_p, 0, sizeof(*cmd_p));
  359. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
  360. cmd_p->resource_id = cpu_to_le32(resource_id);
  361. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  362. }
  363. void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
  364. uint32_t resource_id)
  365. {
  366. struct virtio_gpu_resource_detach_backing *cmd_p;
  367. struct virtio_gpu_vbuffer *vbuf;
  368. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  369. memset(cmd_p, 0, sizeof(*cmd_p));
  370. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
  371. cmd_p->resource_id = cpu_to_le32(resource_id);
  372. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  373. }
  374. void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
  375. uint32_t scanout_id, uint32_t resource_id,
  376. uint32_t width, uint32_t height,
  377. uint32_t x, uint32_t y)
  378. {
  379. struct virtio_gpu_set_scanout *cmd_p;
  380. struct virtio_gpu_vbuffer *vbuf;
  381. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  382. memset(cmd_p, 0, sizeof(*cmd_p));
  383. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
  384. cmd_p->resource_id = cpu_to_le32(resource_id);
  385. cmd_p->scanout_id = cpu_to_le32(scanout_id);
  386. cmd_p->r.width = cpu_to_le32(width);
  387. cmd_p->r.height = cpu_to_le32(height);
  388. cmd_p->r.x = cpu_to_le32(x);
  389. cmd_p->r.y = cpu_to_le32(y);
  390. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  391. }
  392. void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
  393. uint32_t resource_id,
  394. uint32_t x, uint32_t y,
  395. uint32_t width, uint32_t height)
  396. {
  397. struct virtio_gpu_resource_flush *cmd_p;
  398. struct virtio_gpu_vbuffer *vbuf;
  399. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  400. memset(cmd_p, 0, sizeof(*cmd_p));
  401. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
  402. cmd_p->resource_id = cpu_to_le32(resource_id);
  403. cmd_p->r.width = cpu_to_le32(width);
  404. cmd_p->r.height = cpu_to_le32(height);
  405. cmd_p->r.x = cpu_to_le32(x);
  406. cmd_p->r.y = cpu_to_le32(y);
  407. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  408. }
  409. void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
  410. uint32_t resource_id, uint64_t offset,
  411. __le32 width, __le32 height,
  412. __le32 x, __le32 y,
  413. struct virtio_gpu_fence **fence)
  414. {
  415. struct virtio_gpu_transfer_to_host_2d *cmd_p;
  416. struct virtio_gpu_vbuffer *vbuf;
  417. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  418. memset(cmd_p, 0, sizeof(*cmd_p));
  419. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
  420. cmd_p->resource_id = cpu_to_le32(resource_id);
  421. cmd_p->offset = cpu_to_le64(offset);
  422. cmd_p->r.width = width;
  423. cmd_p->r.height = height;
  424. cmd_p->r.x = x;
  425. cmd_p->r.y = y;
  426. virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
  427. }
  428. static void
  429. virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
  430. uint32_t resource_id,
  431. struct virtio_gpu_mem_entry *ents,
  432. uint32_t nents,
  433. struct virtio_gpu_fence **fence)
  434. {
  435. struct virtio_gpu_resource_attach_backing *cmd_p;
  436. struct virtio_gpu_vbuffer *vbuf;
  437. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  438. memset(cmd_p, 0, sizeof(*cmd_p));
  439. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
  440. cmd_p->resource_id = cpu_to_le32(resource_id);
  441. cmd_p->nr_entries = cpu_to_le32(nents);
  442. vbuf->data_buf = ents;
  443. vbuf->data_size = sizeof(*ents) * nents;
  444. virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
  445. }
  446. static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
  447. struct virtio_gpu_vbuffer *vbuf)
  448. {
  449. struct virtio_gpu_resp_display_info *resp =
  450. (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
  451. int i;
  452. spin_lock(&vgdev->display_info_lock);
  453. for (i = 0; i < vgdev->num_scanouts; i++) {
  454. vgdev->outputs[i].info = resp->pmodes[i];
  455. if (resp->pmodes[i].enabled) {
  456. DRM_DEBUG("output %d: %dx%d+%d+%d", i,
  457. le32_to_cpu(resp->pmodes[i].r.width),
  458. le32_to_cpu(resp->pmodes[i].r.height),
  459. le32_to_cpu(resp->pmodes[i].r.x),
  460. le32_to_cpu(resp->pmodes[i].r.y));
  461. } else {
  462. DRM_DEBUG("output %d: disabled", i);
  463. }
  464. }
  465. vgdev->display_info_pending = false;
  466. spin_unlock(&vgdev->display_info_lock);
  467. wake_up(&vgdev->resp_wq);
  468. if (!drm_helper_hpd_irq_event(vgdev->ddev))
  469. drm_kms_helper_hotplug_event(vgdev->ddev);
  470. }
  471. static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
  472. struct virtio_gpu_vbuffer *vbuf)
  473. {
  474. struct virtio_gpu_get_capset_info *cmd =
  475. (struct virtio_gpu_get_capset_info *)vbuf->buf;
  476. struct virtio_gpu_resp_capset_info *resp =
  477. (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
  478. int i = le32_to_cpu(cmd->capset_index);
  479. spin_lock(&vgdev->display_info_lock);
  480. vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
  481. vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
  482. vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
  483. spin_unlock(&vgdev->display_info_lock);
  484. wake_up(&vgdev->resp_wq);
  485. }
  486. static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
  487. struct virtio_gpu_vbuffer *vbuf)
  488. {
  489. struct virtio_gpu_get_capset *cmd =
  490. (struct virtio_gpu_get_capset *)vbuf->buf;
  491. struct virtio_gpu_resp_capset *resp =
  492. (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
  493. struct virtio_gpu_drv_cap_cache *cache_ent;
  494. spin_lock(&vgdev->display_info_lock);
  495. list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
  496. if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
  497. cache_ent->id == le32_to_cpu(cmd->capset_id)) {
  498. memcpy(cache_ent->caps_cache, resp->capset_data,
  499. cache_ent->size);
  500. atomic_set(&cache_ent->is_valid, 1);
  501. break;
  502. }
  503. }
  504. spin_unlock(&vgdev->display_info_lock);
  505. wake_up(&vgdev->resp_wq);
  506. }
  507. int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
  508. {
  509. struct virtio_gpu_ctrl_hdr *cmd_p;
  510. struct virtio_gpu_vbuffer *vbuf;
  511. void *resp_buf;
  512. resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
  513. GFP_KERNEL);
  514. if (!resp_buf)
  515. return -ENOMEM;
  516. cmd_p = virtio_gpu_alloc_cmd_resp
  517. (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
  518. sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
  519. resp_buf);
  520. memset(cmd_p, 0, sizeof(*cmd_p));
  521. vgdev->display_info_pending = true;
  522. cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
  523. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  524. return 0;
  525. }
  526. int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
  527. {
  528. struct virtio_gpu_get_capset_info *cmd_p;
  529. struct virtio_gpu_vbuffer *vbuf;
  530. void *resp_buf;
  531. resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
  532. GFP_KERNEL);
  533. if (!resp_buf)
  534. return -ENOMEM;
  535. cmd_p = virtio_gpu_alloc_cmd_resp
  536. (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
  537. sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
  538. resp_buf);
  539. memset(cmd_p, 0, sizeof(*cmd_p));
  540. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
  541. cmd_p->capset_index = cpu_to_le32(idx);
  542. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  543. return 0;
  544. }
  545. int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
  546. int idx, int version,
  547. struct virtio_gpu_drv_cap_cache **cache_p)
  548. {
  549. struct virtio_gpu_get_capset *cmd_p;
  550. struct virtio_gpu_vbuffer *vbuf;
  551. int max_size = vgdev->capsets[idx].max_size;
  552. struct virtio_gpu_drv_cap_cache *cache_ent;
  553. void *resp_buf;
  554. if (idx > vgdev->num_capsets)
  555. return -EINVAL;
  556. if (version > vgdev->capsets[idx].max_version)
  557. return -EINVAL;
  558. cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
  559. if (!cache_ent)
  560. return -ENOMEM;
  561. cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
  562. if (!cache_ent->caps_cache) {
  563. kfree(cache_ent);
  564. return -ENOMEM;
  565. }
  566. resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
  567. GFP_KERNEL);
  568. if (!resp_buf) {
  569. kfree(cache_ent->caps_cache);
  570. kfree(cache_ent);
  571. return -ENOMEM;
  572. }
  573. cache_ent->version = version;
  574. cache_ent->id = vgdev->capsets[idx].id;
  575. atomic_set(&cache_ent->is_valid, 0);
  576. cache_ent->size = max_size;
  577. spin_lock(&vgdev->display_info_lock);
  578. list_add_tail(&cache_ent->head, &vgdev->cap_cache);
  579. spin_unlock(&vgdev->display_info_lock);
  580. cmd_p = virtio_gpu_alloc_cmd_resp
  581. (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
  582. sizeof(struct virtio_gpu_resp_capset) + max_size,
  583. resp_buf);
  584. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
  585. cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
  586. cmd_p->capset_version = cpu_to_le32(version);
  587. *cache_p = cache_ent;
  588. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  589. return 0;
  590. }
  591. void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
  592. uint32_t nlen, const char *name)
  593. {
  594. struct virtio_gpu_ctx_create *cmd_p;
  595. struct virtio_gpu_vbuffer *vbuf;
  596. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  597. memset(cmd_p, 0, sizeof(*cmd_p));
  598. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
  599. cmd_p->hdr.ctx_id = cpu_to_le32(id);
  600. cmd_p->nlen = cpu_to_le32(nlen);
  601. strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
  602. cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
  603. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  604. }
  605. void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
  606. uint32_t id)
  607. {
  608. struct virtio_gpu_ctx_destroy *cmd_p;
  609. struct virtio_gpu_vbuffer *vbuf;
  610. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  611. memset(cmd_p, 0, sizeof(*cmd_p));
  612. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
  613. cmd_p->hdr.ctx_id = cpu_to_le32(id);
  614. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  615. }
  616. void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
  617. uint32_t ctx_id,
  618. uint32_t resource_id)
  619. {
  620. struct virtio_gpu_ctx_resource *cmd_p;
  621. struct virtio_gpu_vbuffer *vbuf;
  622. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  623. memset(cmd_p, 0, sizeof(*cmd_p));
  624. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
  625. cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
  626. cmd_p->resource_id = cpu_to_le32(resource_id);
  627. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  628. }
  629. void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
  630. uint32_t ctx_id,
  631. uint32_t resource_id)
  632. {
  633. struct virtio_gpu_ctx_resource *cmd_p;
  634. struct virtio_gpu_vbuffer *vbuf;
  635. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  636. memset(cmd_p, 0, sizeof(*cmd_p));
  637. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
  638. cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
  639. cmd_p->resource_id = cpu_to_le32(resource_id);
  640. virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
  641. }
  642. void
  643. virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
  644. struct virtio_gpu_resource_create_3d *rc_3d,
  645. struct virtio_gpu_fence **fence)
  646. {
  647. struct virtio_gpu_resource_create_3d *cmd_p;
  648. struct virtio_gpu_vbuffer *vbuf;
  649. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  650. memset(cmd_p, 0, sizeof(*cmd_p));
  651. *cmd_p = *rc_3d;
  652. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
  653. cmd_p->hdr.flags = 0;
  654. virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
  655. }
  656. void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
  657. uint32_t resource_id, uint32_t ctx_id,
  658. uint64_t offset, uint32_t level,
  659. struct virtio_gpu_box *box,
  660. struct virtio_gpu_fence **fence)
  661. {
  662. struct virtio_gpu_transfer_host_3d *cmd_p;
  663. struct virtio_gpu_vbuffer *vbuf;
  664. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  665. memset(cmd_p, 0, sizeof(*cmd_p));
  666. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
  667. cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
  668. cmd_p->resource_id = cpu_to_le32(resource_id);
  669. cmd_p->box = *box;
  670. cmd_p->offset = cpu_to_le64(offset);
  671. cmd_p->level = cpu_to_le32(level);
  672. virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
  673. }
  674. void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
  675. uint32_t resource_id, uint32_t ctx_id,
  676. uint64_t offset, uint32_t level,
  677. struct virtio_gpu_box *box,
  678. struct virtio_gpu_fence **fence)
  679. {
  680. struct virtio_gpu_transfer_host_3d *cmd_p;
  681. struct virtio_gpu_vbuffer *vbuf;
  682. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  683. memset(cmd_p, 0, sizeof(*cmd_p));
  684. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
  685. cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
  686. cmd_p->resource_id = cpu_to_le32(resource_id);
  687. cmd_p->box = *box;
  688. cmd_p->offset = cpu_to_le64(offset);
  689. cmd_p->level = cpu_to_le32(level);
  690. virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
  691. }
  692. void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
  693. void *data, uint32_t data_size,
  694. uint32_t ctx_id, struct virtio_gpu_fence **fence)
  695. {
  696. struct virtio_gpu_cmd_submit *cmd_p;
  697. struct virtio_gpu_vbuffer *vbuf;
  698. cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
  699. memset(cmd_p, 0, sizeof(*cmd_p));
  700. vbuf->data_buf = data;
  701. vbuf->data_size = data_size;
  702. cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
  703. cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
  704. cmd_p->size = cpu_to_le32(data_size);
  705. virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
  706. }
  707. int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
  708. struct virtio_gpu_object *obj,
  709. uint32_t resource_id,
  710. struct virtio_gpu_fence **fence)
  711. {
  712. struct virtio_gpu_mem_entry *ents;
  713. struct scatterlist *sg;
  714. int si;
  715. if (!obj->pages) {
  716. int ret;
  717. ret = virtio_gpu_object_get_sg_table(vgdev, obj);
  718. if (ret)
  719. return ret;
  720. }
  721. /* gets freed when the ring has consumed it */
  722. ents = kmalloc_array(obj->pages->nents,
  723. sizeof(struct virtio_gpu_mem_entry),
  724. GFP_KERNEL);
  725. if (!ents) {
  726. DRM_ERROR("failed to allocate ent list\n");
  727. return -ENOMEM;
  728. }
  729. for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
  730. ents[si].addr = cpu_to_le64(sg_phys(sg));
  731. ents[si].length = cpu_to_le32(sg->length);
  732. ents[si].padding = 0;
  733. }
  734. virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
  735. ents, obj->pages->nents,
  736. fence);
  737. obj->hw_res_handle = resource_id;
  738. return 0;
  739. }
  740. void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
  741. struct virtio_gpu_output *output)
  742. {
  743. struct virtio_gpu_vbuffer *vbuf;
  744. struct virtio_gpu_update_cursor *cur_p;
  745. output->cursor.pos.scanout_id = cpu_to_le32(output->index);
  746. cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
  747. memcpy(cur_p, &output->cursor, sizeof(output->cursor));
  748. virtio_gpu_queue_cursor(vgdev, vbuf);
  749. }