|
@@ -37,7 +37,9 @@ void vkms_gem_free_object(struct drm_gem_object *obj)
|
|
|
struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
|
|
|
gem);
|
|
|
|
|
|
- kvfree(gem->pages);
|
|
|
+ WARN_ON(gem->pages);
|
|
|
+ WARN_ON(gem->vaddr);
|
|
|
+
|
|
|
mutex_destroy(&gem->pages_lock);
|
|
|
drm_gem_object_release(obj);
|
|
|
kfree(gem);
|
|
@@ -177,3 +179,78 @@ unref:
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
+
|
|
|
+static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
|
|
|
+{
|
|
|
+ struct drm_gem_object *gem_obj = &vkms_obj->gem;
|
|
|
+
|
|
|
+ if (!vkms_obj->pages) {
|
|
|
+ struct page **pages = drm_gem_get_pages(gem_obj);
|
|
|
+
|
|
|
+ if (IS_ERR(pages))
|
|
|
+ return pages;
|
|
|
+
|
|
|
+ if (cmpxchg(&vkms_obj->pages, NULL, pages))
|
|
|
+ drm_gem_put_pages(gem_obj, pages, false, true);
|
|
|
+ }
|
|
|
+
|
|
|
+ return vkms_obj->pages;
|
|
|
+}
|
|
|
+
|
|
|
+void vkms_gem_vunmap(struct drm_gem_object *obj)
|
|
|
+{
|
|
|
+ struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
|
|
|
+
|
|
|
+ mutex_lock(&vkms_obj->pages_lock);
|
|
|
+ if (vkms_obj->vmap_count < 1) {
|
|
|
+ WARN_ON(vkms_obj->vaddr);
|
|
|
+ WARN_ON(vkms_obj->pages);
|
|
|
+ mutex_unlock(&vkms_obj->pages_lock);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ vkms_obj->vmap_count--;
|
|
|
+
|
|
|
+ if (vkms_obj->vmap_count == 0) {
|
|
|
+ vunmap(vkms_obj->vaddr);
|
|
|
+ vkms_obj->vaddr = NULL;
|
|
|
+ drm_gem_put_pages(obj, vkms_obj->pages, false, true);
|
|
|
+ vkms_obj->pages = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ mutex_unlock(&vkms_obj->pages_lock);
|
|
|
+}
|
|
|
+
|
|
|
+int vkms_gem_vmap(struct drm_gem_object *obj)
|
|
|
+{
|
|
|
+ struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ mutex_lock(&vkms_obj->pages_lock);
|
|
|
+
|
|
|
+ if (!vkms_obj->vaddr) {
|
|
|
+ unsigned int n_pages = obj->size >> PAGE_SHIFT;
|
|
|
+ struct page **pages = _get_pages(vkms_obj);
|
|
|
+
|
|
|
+ if (IS_ERR(pages)) {
|
|
|
+ ret = PTR_ERR(pages);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
|
|
|
+ if (!vkms_obj->vaddr)
|
|
|
+ goto err_vmap;
|
|
|
+
|
|
|
+ vkms_obj->vmap_count++;
|
|
|
+ }
|
|
|
+
|
|
|
+ goto out;
|
|
|
+
|
|
|
+err_vmap:
|
|
|
+ ret = -ENOMEM;
|
|
|
+ drm_gem_put_pages(obj, vkms_obj->pages, false, true);
|
|
|
+ vkms_obj->pages = NULL;
|
|
|
+out:
|
|
|
+ mutex_unlock(&vkms_obj->pages_lock);
|
|
|
+ return ret;
|
|
|
+}
|