vgem_drv.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516
  1. /*
  2. * Copyright 2011 Red Hat, Inc.
  3. * Copyright © 2014 The Chromium OS Authors
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software")
  7. * to deal in the software without restriction, including without limitation
  8. * on the rights to use, copy, modify, merge, publish, distribute, sub
  9. * license, and/or sell copies of the Software, and to permit persons to whom
  10. * them Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice (including the next
  13. * paragraph) shall be included in all copies or substantial portions of the
  14. * Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
  20. * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
  21. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Adam Jackson <ajax@redhat.com>
  25. * Ben Widawsky <ben@bwidawsk.net>
  26. */
  27. /**
  28. * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's
  29. * software renderer and the X server for efficient buffer sharing.
  30. */
  31. #include <linux/module.h>
  32. #include <linux/ramfs.h>
  33. #include <linux/shmem_fs.h>
  34. #include <linux/dma-buf.h>
  35. #include "vgem_drv.h"
  36. #define DRIVER_NAME "vgem"
  37. #define DRIVER_DESC "Virtual GEM provider"
  38. #define DRIVER_DATE "20120112"
  39. #define DRIVER_MAJOR 1
  40. #define DRIVER_MINOR 0
  41. static struct vgem_device {
  42. struct drm_device drm;
  43. struct platform_device *platform;
  44. } *vgem_device;
  45. static void vgem_gem_free_object(struct drm_gem_object *obj)
  46. {
  47. struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
  48. kvfree(vgem_obj->pages);
  49. mutex_destroy(&vgem_obj->pages_lock);
  50. if (obj->import_attach)
  51. drm_prime_gem_destroy(obj, vgem_obj->table);
  52. drm_gem_object_release(obj);
  53. kfree(vgem_obj);
  54. }
  55. static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
  56. {
  57. struct vm_area_struct *vma = vmf->vma;
  58. struct drm_vgem_gem_object *obj = vma->vm_private_data;
  59. /* We don't use vmf->pgoff since that has the fake offset */
  60. unsigned long vaddr = vmf->address;
  61. vm_fault_t ret = VM_FAULT_SIGBUS;
  62. loff_t num_pages;
  63. pgoff_t page_offset;
  64. page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
  65. num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
  66. if (page_offset >= num_pages)
  67. return VM_FAULT_SIGBUS;
  68. mutex_lock(&obj->pages_lock);
  69. if (obj->pages) {
  70. get_page(obj->pages[page_offset]);
  71. vmf->page = obj->pages[page_offset];
  72. ret = 0;
  73. }
  74. mutex_unlock(&obj->pages_lock);
  75. if (ret) {
  76. struct page *page;
  77. page = shmem_read_mapping_page(
  78. file_inode(obj->base.filp)->i_mapping,
  79. page_offset);
  80. if (!IS_ERR(page)) {
  81. vmf->page = page;
  82. ret = 0;
  83. } else switch (PTR_ERR(page)) {
  84. case -ENOSPC:
  85. case -ENOMEM:
  86. ret = VM_FAULT_OOM;
  87. break;
  88. case -EBUSY:
  89. ret = VM_FAULT_RETRY;
  90. break;
  91. case -EFAULT:
  92. case -EINVAL:
  93. ret = VM_FAULT_SIGBUS;
  94. break;
  95. default:
  96. WARN_ON(PTR_ERR(page));
  97. ret = VM_FAULT_SIGBUS;
  98. break;
  99. }
  100. }
  101. return ret;
  102. }
  103. static const struct vm_operations_struct vgem_gem_vm_ops = {
  104. .fault = vgem_gem_fault,
  105. .open = drm_gem_vm_open,
  106. .close = drm_gem_vm_close,
  107. };
  108. static int vgem_open(struct drm_device *dev, struct drm_file *file)
  109. {
  110. struct vgem_file *vfile;
  111. int ret;
  112. vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
  113. if (!vfile)
  114. return -ENOMEM;
  115. file->driver_priv = vfile;
  116. ret = vgem_fence_open(vfile);
  117. if (ret) {
  118. kfree(vfile);
  119. return ret;
  120. }
  121. return 0;
  122. }
  123. static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
  124. {
  125. struct vgem_file *vfile = file->driver_priv;
  126. vgem_fence_close(vfile);
  127. kfree(vfile);
  128. }
  129. static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
  130. unsigned long size)
  131. {
  132. struct drm_vgem_gem_object *obj;
  133. int ret;
  134. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  135. if (!obj)
  136. return ERR_PTR(-ENOMEM);
  137. ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
  138. if (ret) {
  139. kfree(obj);
  140. return ERR_PTR(ret);
  141. }
  142. mutex_init(&obj->pages_lock);
  143. return obj;
  144. }
  145. static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
  146. {
  147. drm_gem_object_release(&obj->base);
  148. kfree(obj);
  149. }
  150. static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
  151. struct drm_file *file,
  152. unsigned int *handle,
  153. unsigned long size)
  154. {
  155. struct drm_vgem_gem_object *obj;
  156. int ret;
  157. obj = __vgem_gem_create(dev, size);
  158. if (IS_ERR(obj))
  159. return ERR_CAST(obj);
  160. ret = drm_gem_handle_create(file, &obj->base, handle);
  161. drm_gem_object_put_unlocked(&obj->base);
  162. if (ret)
  163. goto err;
  164. return &obj->base;
  165. err:
  166. __vgem_gem_destroy(obj);
  167. return ERR_PTR(ret);
  168. }
  169. static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  170. struct drm_mode_create_dumb *args)
  171. {
  172. struct drm_gem_object *gem_object;
  173. u64 pitch, size;
  174. pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
  175. size = args->height * pitch;
  176. if (size == 0)
  177. return -EINVAL;
  178. gem_object = vgem_gem_create(dev, file, &args->handle, size);
  179. if (IS_ERR(gem_object))
  180. return PTR_ERR(gem_object);
  181. args->size = gem_object->size;
  182. args->pitch = pitch;
  183. DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
  184. return 0;
  185. }
  186. static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
  187. uint32_t handle, uint64_t *offset)
  188. {
  189. struct drm_gem_object *obj;
  190. int ret;
  191. obj = drm_gem_object_lookup(file, handle);
  192. if (!obj)
  193. return -ENOENT;
  194. if (!obj->filp) {
  195. ret = -EINVAL;
  196. goto unref;
  197. }
  198. ret = drm_gem_create_mmap_offset(obj);
  199. if (ret)
  200. goto unref;
  201. *offset = drm_vma_node_offset_addr(&obj->vma_node);
  202. unref:
  203. drm_gem_object_put_unlocked(obj);
  204. return ret;
  205. }
  206. static struct drm_ioctl_desc vgem_ioctls[] = {
  207. DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
  208. DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
  209. };
  210. static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
  211. {
  212. unsigned long flags = vma->vm_flags;
  213. int ret;
  214. ret = drm_gem_mmap(filp, vma);
  215. if (ret)
  216. return ret;
  217. /* Keep the WC mmaping set by drm_gem_mmap() but our pages
  218. * are ordinary and not special.
  219. */
  220. vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
  221. return 0;
  222. }
  223. static const struct file_operations vgem_driver_fops = {
  224. .owner = THIS_MODULE,
  225. .open = drm_open,
  226. .mmap = vgem_mmap,
  227. .poll = drm_poll,
  228. .read = drm_read,
  229. .unlocked_ioctl = drm_ioctl,
  230. .compat_ioctl = drm_compat_ioctl,
  231. .release = drm_release,
  232. };
  233. static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
  234. {
  235. mutex_lock(&bo->pages_lock);
  236. if (bo->pages_pin_count++ == 0) {
  237. struct page **pages;
  238. pages = drm_gem_get_pages(&bo->base);
  239. if (IS_ERR(pages)) {
  240. bo->pages_pin_count--;
  241. mutex_unlock(&bo->pages_lock);
  242. return pages;
  243. }
  244. bo->pages = pages;
  245. }
  246. mutex_unlock(&bo->pages_lock);
  247. return bo->pages;
  248. }
  249. static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
  250. {
  251. mutex_lock(&bo->pages_lock);
  252. if (--bo->pages_pin_count == 0) {
  253. drm_gem_put_pages(&bo->base, bo->pages, true, true);
  254. bo->pages = NULL;
  255. }
  256. mutex_unlock(&bo->pages_lock);
  257. }
  258. static int vgem_prime_pin(struct drm_gem_object *obj)
  259. {
  260. struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
  261. long n_pages = obj->size >> PAGE_SHIFT;
  262. struct page **pages;
  263. pages = vgem_pin_pages(bo);
  264. if (IS_ERR(pages))
  265. return PTR_ERR(pages);
  266. /* Flush the object from the CPU cache so that importers can rely
  267. * on coherent indirect access via the exported dma-address.
  268. */
  269. drm_clflush_pages(pages, n_pages);
  270. return 0;
  271. }
  272. static void vgem_prime_unpin(struct drm_gem_object *obj)
  273. {
  274. struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
  275. vgem_unpin_pages(bo);
  276. }
  277. static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
  278. {
  279. struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
  280. return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
  281. }
  282. static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
  283. struct dma_buf *dma_buf)
  284. {
  285. struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
  286. return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
  287. }
  288. static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
  289. struct dma_buf_attachment *attach, struct sg_table *sg)
  290. {
  291. struct drm_vgem_gem_object *obj;
  292. int npages;
  293. obj = __vgem_gem_create(dev, attach->dmabuf->size);
  294. if (IS_ERR(obj))
  295. return ERR_CAST(obj);
  296. npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
  297. obj->table = sg;
  298. obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  299. if (!obj->pages) {
  300. __vgem_gem_destroy(obj);
  301. return ERR_PTR(-ENOMEM);
  302. }
  303. obj->pages_pin_count++; /* perma-pinned */
  304. drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
  305. npages);
  306. return &obj->base;
  307. }
  308. static void *vgem_prime_vmap(struct drm_gem_object *obj)
  309. {
  310. struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
  311. long n_pages = obj->size >> PAGE_SHIFT;
  312. struct page **pages;
  313. pages = vgem_pin_pages(bo);
  314. if (IS_ERR(pages))
  315. return NULL;
  316. return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
  317. }
  318. static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
  319. {
  320. struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
  321. vunmap(vaddr);
  322. vgem_unpin_pages(bo);
  323. }
  324. static int vgem_prime_mmap(struct drm_gem_object *obj,
  325. struct vm_area_struct *vma)
  326. {
  327. int ret;
  328. if (obj->size < vma->vm_end - vma->vm_start)
  329. return -EINVAL;
  330. if (!obj->filp)
  331. return -ENODEV;
  332. ret = call_mmap(obj->filp, vma);
  333. if (ret)
  334. return ret;
  335. fput(vma->vm_file);
  336. vma->vm_file = get_file(obj->filp);
  337. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  338. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  339. return 0;
  340. }
  341. static void vgem_release(struct drm_device *dev)
  342. {
  343. struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
  344. platform_device_unregister(vgem->platform);
  345. drm_dev_fini(&vgem->drm);
  346. kfree(vgem);
  347. }
  348. static struct drm_driver vgem_driver = {
  349. .driver_features = DRIVER_GEM | DRIVER_PRIME,
  350. .release = vgem_release,
  351. .open = vgem_open,
  352. .postclose = vgem_postclose,
  353. .gem_free_object_unlocked = vgem_gem_free_object,
  354. .gem_vm_ops = &vgem_gem_vm_ops,
  355. .ioctls = vgem_ioctls,
  356. .num_ioctls = ARRAY_SIZE(vgem_ioctls),
  357. .fops = &vgem_driver_fops,
  358. .dumb_create = vgem_gem_dumb_create,
  359. .dumb_map_offset = vgem_gem_dumb_map,
  360. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  361. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  362. .gem_prime_pin = vgem_prime_pin,
  363. .gem_prime_unpin = vgem_prime_unpin,
  364. .gem_prime_import = vgem_prime_import,
  365. .gem_prime_export = drm_gem_prime_export,
  366. .gem_prime_import_sg_table = vgem_prime_import_sg_table,
  367. .gem_prime_get_sg_table = vgem_prime_get_sg_table,
  368. .gem_prime_vmap = vgem_prime_vmap,
  369. .gem_prime_vunmap = vgem_prime_vunmap,
  370. .gem_prime_mmap = vgem_prime_mmap,
  371. .name = DRIVER_NAME,
  372. .desc = DRIVER_DESC,
  373. .date = DRIVER_DATE,
  374. .major = DRIVER_MAJOR,
  375. .minor = DRIVER_MINOR,
  376. };
  377. static int __init vgem_init(void)
  378. {
  379. int ret;
  380. vgem_device = kzalloc(sizeof(*vgem_device), GFP_KERNEL);
  381. if (!vgem_device)
  382. return -ENOMEM;
  383. ret = drm_dev_init(&vgem_device->drm, &vgem_driver, NULL);
  384. if (ret)
  385. goto out_free;
  386. vgem_device->platform =
  387. platform_device_register_simple("vgem", -1, NULL, 0);
  388. if (IS_ERR(vgem_device->platform)) {
  389. ret = PTR_ERR(vgem_device->platform);
  390. goto out_fini;
  391. }
  392. dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
  393. DMA_BIT_MASK(64));
  394. /* Final step: expose the device/driver to userspace */
  395. ret = drm_dev_register(&vgem_device->drm, 0);
  396. if (ret)
  397. goto out_unregister;
  398. return 0;
  399. out_unregister:
  400. platform_device_unregister(vgem_device->platform);
  401. out_fini:
  402. drm_dev_fini(&vgem_device->drm);
  403. out_free:
  404. kfree(vgem_device);
  405. return ret;
  406. }
  407. static void __exit vgem_exit(void)
  408. {
  409. drm_dev_unregister(&vgem_device->drm);
  410. drm_dev_put(&vgem_device->drm);
  411. }
  412. module_init(vgem_init);
  413. module_exit(vgem_exit);
  414. MODULE_AUTHOR("Red Hat, Inc.");
  415. MODULE_AUTHOR("Intel Corporation");
  416. MODULE_DESCRIPTION(DRIVER_DESC);
  417. MODULE_LICENSE("GPL and additional rights");