|
@@ -74,51 +74,19 @@ void virtio_gpu_cursor_ack(struct virtqueue *vq)
|
|
|
|
|
|
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
|
|
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
|
|
{
|
|
{
|
|
- struct virtio_gpu_vbuffer *vbuf;
|
|
|
|
- int i, size, count = 16;
|
|
|
|
- void *ptr;
|
|
|
|
-
|
|
|
|
- INIT_LIST_HEAD(&vgdev->free_vbufs);
|
|
|
|
- spin_lock_init(&vgdev->free_vbufs_lock);
|
|
|
|
- count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
|
|
|
|
- count += virtqueue_get_vring_size(vgdev->cursorq.vq);
|
|
|
|
- size = count * VBUFFER_SIZE;
|
|
|
|
- DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
|
|
|
|
- count, VBUFFER_SIZE, size / 1024);
|
|
|
|
-
|
|
|
|
- vgdev->vbufs = kzalloc(size, GFP_KERNEL);
|
|
|
|
|
|
+ vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
|
|
|
|
+ VBUFFER_SIZE,
|
|
|
|
+ __alignof__(struct virtio_gpu_vbuffer),
|
|
|
|
+ 0, NULL);
|
|
if (!vgdev->vbufs)
|
|
if (!vgdev->vbufs)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
-
|
|
|
|
- for (i = 0, ptr = vgdev->vbufs;
|
|
|
|
- i < count;
|
|
|
|
- i++, ptr += VBUFFER_SIZE) {
|
|
|
|
- vbuf = ptr;
|
|
|
|
- list_add(&vbuf->list, &vgdev->free_vbufs);
|
|
|
|
- }
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
|
|
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
|
|
{
|
|
{
|
|
- struct virtio_gpu_vbuffer *vbuf;
|
|
|
|
- int i, count = 0;
|
|
|
|
-
|
|
|
|
- count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
|
|
|
|
- count += virtqueue_get_vring_size(vgdev->cursorq.vq);
|
|
|
|
-
|
|
|
|
- spin_lock(&vgdev->free_vbufs_lock);
|
|
|
|
- for (i = 0; i < count; i++) {
|
|
|
|
- if (WARN_ON(list_empty(&vgdev->free_vbufs))) {
|
|
|
|
- spin_unlock(&vgdev->free_vbufs_lock);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
- vbuf = list_first_entry(&vgdev->free_vbufs,
|
|
|
|
- struct virtio_gpu_vbuffer, list);
|
|
|
|
- list_del(&vbuf->list);
|
|
|
|
- }
|
|
|
|
- spin_unlock(&vgdev->free_vbufs_lock);
|
|
|
|
- kfree(vgdev->vbufs);
|
|
|
|
|
|
+ kmem_cache_destroy(vgdev->vbufs);
|
|
|
|
+ vgdev->vbufs = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
static struct virtio_gpu_vbuffer*
|
|
static struct virtio_gpu_vbuffer*
|
|
@@ -128,12 +96,9 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
|
|
{
|
|
{
|
|
struct virtio_gpu_vbuffer *vbuf;
|
|
struct virtio_gpu_vbuffer *vbuf;
|
|
|
|
|
|
- spin_lock(&vgdev->free_vbufs_lock);
|
|
|
|
- BUG_ON(list_empty(&vgdev->free_vbufs));
|
|
|
|
- vbuf = list_first_entry(&vgdev->free_vbufs,
|
|
|
|
- struct virtio_gpu_vbuffer, list);
|
|
|
|
- list_del(&vbuf->list);
|
|
|
|
- spin_unlock(&vgdev->free_vbufs_lock);
|
|
|
|
|
|
+ vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
|
|
|
|
+ if (IS_ERR(vbuf))
|
|
|
|
+ return ERR_CAST(vbuf);
|
|
memset(vbuf, 0, VBUFFER_SIZE);
|
|
memset(vbuf, 0, VBUFFER_SIZE);
|
|
|
|
|
|
BUG_ON(size > MAX_INLINE_CMD_SIZE);
|
|
BUG_ON(size > MAX_INLINE_CMD_SIZE);
|
|
@@ -208,9 +173,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev,
|
|
if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
|
|
if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
|
|
kfree(vbuf->resp_buf);
|
|
kfree(vbuf->resp_buf);
|
|
kfree(vbuf->data_buf);
|
|
kfree(vbuf->data_buf);
|
|
- spin_lock(&vgdev->free_vbufs_lock);
|
|
|
|
- list_add(&vbuf->list, &vgdev->free_vbufs);
|
|
|
|
- spin_unlock(&vgdev->free_vbufs_lock);
|
|
|
|
|
|
+ kmem_cache_free(vgdev->vbufs, vbuf);
|
|
}
|
|
}
|
|
|
|
|
|
static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
|
|
static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
|