xen_drm_front_gem.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /*
  3. * Xen para-virtual DRM device
  4. *
  5. * Copyright (C) 2016-2018 EPAM Systems Inc.
  6. *
  7. * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
  8. */
  9. #include "xen_drm_front_gem.h"
  10. #include <drm/drmP.h>
  11. #include <drm/drm_crtc_helper.h>
  12. #include <drm/drm_fb_helper.h>
  13. #include <drm/drm_gem.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/scatterlist.h>
  16. #include <linux/shmem_fs.h>
  17. #include <xen/balloon.h>
  18. #include "xen_drm_front.h"
  19. #include "xen_drm_front_shbuf.h"
  20. struct xen_gem_object {
  21. struct drm_gem_object base;
  22. size_t num_pages;
  23. struct page **pages;
  24. /* set for buffers allocated by the backend */
  25. bool be_alloc;
  26. /* this is for imported PRIME buffer */
  27. struct sg_table *sgt_imported;
  28. };
  29. static inline struct xen_gem_object *
  30. to_xen_gem_obj(struct drm_gem_object *gem_obj)
  31. {
  32. return container_of(gem_obj, struct xen_gem_object, base);
  33. }
  34. static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
  35. size_t buf_size)
  36. {
  37. xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
  38. xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
  39. sizeof(struct page *), GFP_KERNEL);
  40. return !xen_obj->pages ? -ENOMEM : 0;
  41. }
  42. static void gem_free_pages_array(struct xen_gem_object *xen_obj)
  43. {
  44. kvfree(xen_obj->pages);
  45. xen_obj->pages = NULL;
  46. }
  47. static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
  48. size_t size)
  49. {
  50. struct xen_gem_object *xen_obj;
  51. int ret;
  52. xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
  53. if (!xen_obj)
  54. return ERR_PTR(-ENOMEM);
  55. ret = drm_gem_object_init(dev, &xen_obj->base, size);
  56. if (ret < 0) {
  57. kfree(xen_obj);
  58. return ERR_PTR(ret);
  59. }
  60. return xen_obj;
  61. }
  62. static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
  63. {
  64. struct xen_drm_front_drm_info *drm_info = dev->dev_private;
  65. struct xen_gem_object *xen_obj;
  66. int ret;
  67. size = round_up(size, PAGE_SIZE);
  68. xen_obj = gem_create_obj(dev, size);
  69. if (IS_ERR_OR_NULL(xen_obj))
  70. return xen_obj;
  71. if (drm_info->front_info->cfg.be_alloc) {
  72. /*
  73. * backend will allocate space for this buffer, so
  74. * only allocate array of pointers to pages
  75. */
  76. ret = gem_alloc_pages_array(xen_obj, size);
  77. if (ret < 0)
  78. goto fail;
  79. /*
  80. * allocate ballooned pages which will be used to map
  81. * grant references provided by the backend
  82. */
  83. ret = alloc_xenballooned_pages(xen_obj->num_pages,
  84. xen_obj->pages);
  85. if (ret < 0) {
  86. DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
  87. xen_obj->num_pages, ret);
  88. gem_free_pages_array(xen_obj);
  89. goto fail;
  90. }
  91. xen_obj->be_alloc = true;
  92. return xen_obj;
  93. }
  94. /*
  95. * need to allocate backing pages now, so we can share those
  96. * with the backend
  97. */
  98. xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
  99. xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
  100. if (IS_ERR_OR_NULL(xen_obj->pages)) {
  101. ret = PTR_ERR(xen_obj->pages);
  102. xen_obj->pages = NULL;
  103. goto fail;
  104. }
  105. return xen_obj;
  106. fail:
  107. DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
  108. return ERR_PTR(ret);
  109. }
  110. struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
  111. size_t size)
  112. {
  113. struct xen_gem_object *xen_obj;
  114. xen_obj = gem_create(dev, size);
  115. if (IS_ERR_OR_NULL(xen_obj))
  116. return ERR_CAST(xen_obj);
  117. return &xen_obj->base;
  118. }
  119. void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
  120. {
  121. struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
  122. if (xen_obj->base.import_attach) {
  123. drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
  124. gem_free_pages_array(xen_obj);
  125. } else {
  126. if (xen_obj->pages) {
  127. if (xen_obj->be_alloc) {
  128. free_xenballooned_pages(xen_obj->num_pages,
  129. xen_obj->pages);
  130. gem_free_pages_array(xen_obj);
  131. } else {
  132. drm_gem_put_pages(&xen_obj->base,
  133. xen_obj->pages, true, false);
  134. }
  135. }
  136. }
  137. drm_gem_object_release(gem_obj);
  138. kfree(xen_obj);
  139. }
  140. struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
  141. {
  142. struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
  143. return xen_obj->pages;
  144. }
  145. struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
  146. {
  147. struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
  148. if (!xen_obj->pages)
  149. return NULL;
  150. return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
  151. }
  152. struct drm_gem_object *
  153. xen_drm_front_gem_import_sg_table(struct drm_device *dev,
  154. struct dma_buf_attachment *attach,
  155. struct sg_table *sgt)
  156. {
  157. struct xen_drm_front_drm_info *drm_info = dev->dev_private;
  158. struct xen_gem_object *xen_obj;
  159. size_t size;
  160. int ret;
  161. size = attach->dmabuf->size;
  162. xen_obj = gem_create_obj(dev, size);
  163. if (IS_ERR_OR_NULL(xen_obj))
  164. return ERR_CAST(xen_obj);
  165. ret = gem_alloc_pages_array(xen_obj, size);
  166. if (ret < 0)
  167. return ERR_PTR(ret);
  168. xen_obj->sgt_imported = sgt;
  169. ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
  170. NULL, xen_obj->num_pages);
  171. if (ret < 0)
  172. return ERR_PTR(ret);
  173. ret = xen_drm_front_dbuf_create(drm_info->front_info,
  174. xen_drm_front_dbuf_to_cookie(&xen_obj->base),
  175. 0, 0, 0, size, xen_obj->pages);
  176. if (ret < 0)
  177. return ERR_PTR(ret);
  178. DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
  179. size, sgt->nents);
  180. return &xen_obj->base;
  181. }
  182. static int gem_mmap_obj(struct xen_gem_object *xen_obj,
  183. struct vm_area_struct *vma)
  184. {
  185. unsigned long addr = vma->vm_start;
  186. int i;
  187. /*
  188. * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
  189. * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
  190. * the whole buffer.
  191. */
  192. vma->vm_flags &= ~VM_PFNMAP;
  193. vma->vm_flags |= VM_MIXEDMAP;
  194. vma->vm_pgoff = 0;
  195. vma->vm_page_prot =
  196. pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  197. /*
  198. * vm_operations_struct.fault handler will be called if CPU access
  199. * to VM is here. For GPUs this isn't the case, because CPU
  200. * doesn't touch the memory. Insert pages now, so both CPU and GPU are
  201. * happy.
  202. * FIXME: as we insert all the pages now then no .fault handler must
  203. * be called, so don't provide one
  204. */
  205. for (i = 0; i < xen_obj->num_pages; i++) {
  206. int ret;
  207. ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
  208. if (ret < 0) {
  209. DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
  210. return ret;
  211. }
  212. addr += PAGE_SIZE;
  213. }
  214. return 0;
  215. }
  216. int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  217. {
  218. struct xen_gem_object *xen_obj;
  219. struct drm_gem_object *gem_obj;
  220. int ret;
  221. ret = drm_gem_mmap(filp, vma);
  222. if (ret < 0)
  223. return ret;
  224. gem_obj = vma->vm_private_data;
  225. xen_obj = to_xen_gem_obj(gem_obj);
  226. return gem_mmap_obj(xen_obj, vma);
  227. }
  228. void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
  229. {
  230. struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
  231. if (!xen_obj->pages)
  232. return NULL;
  233. return vmap(xen_obj->pages, xen_obj->num_pages,
  234. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  235. }
  236. void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
  237. void *vaddr)
  238. {
  239. vunmap(vaddr);
  240. }
  241. int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
  242. struct vm_area_struct *vma)
  243. {
  244. struct xen_gem_object *xen_obj;
  245. int ret;
  246. ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
  247. if (ret < 0)
  248. return ret;
  249. xen_obj = to_xen_gem_obj(gem_obj);
  250. return gem_mmap_obj(xen_obj, vma);
  251. }