gem.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. /*
  2. * NVIDIA Tegra DRM GEM helper functions
  3. *
  4. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5. * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
  6. *
  7. * Based on the GEM/CMA helpers
  8. *
  9. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. #include <linux/dma-buf.h>
  16. #include <drm/tegra_drm.h>
  17. #include "drm.h"
  18. #include "gem.h"
  19. static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
  20. {
  21. return container_of(bo, struct tegra_bo, base);
  22. }
  23. static void tegra_bo_put(struct host1x_bo *bo)
  24. {
  25. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  26. struct drm_device *drm = obj->gem.dev;
  27. mutex_lock(&drm->struct_mutex);
  28. drm_gem_object_unreference(&obj->gem);
  29. mutex_unlock(&drm->struct_mutex);
  30. }
  31. static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
  32. {
  33. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  34. return obj->paddr;
  35. }
  36. static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
  37. {
  38. }
  39. static void *tegra_bo_mmap(struct host1x_bo *bo)
  40. {
  41. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  42. return obj->vaddr;
  43. }
  44. static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
  45. {
  46. }
  47. static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
  48. {
  49. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  50. return obj->vaddr + page * PAGE_SIZE;
  51. }
  52. static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
  53. void *addr)
  54. {
  55. }
  56. static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
  57. {
  58. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  59. struct drm_device *drm = obj->gem.dev;
  60. mutex_lock(&drm->struct_mutex);
  61. drm_gem_object_reference(&obj->gem);
  62. mutex_unlock(&drm->struct_mutex);
  63. return bo;
  64. }
  65. static const struct host1x_bo_ops tegra_bo_ops = {
  66. .get = tegra_bo_get,
  67. .put = tegra_bo_put,
  68. .pin = tegra_bo_pin,
  69. .unpin = tegra_bo_unpin,
  70. .mmap = tegra_bo_mmap,
  71. .munmap = tegra_bo_munmap,
  72. .kmap = tegra_bo_kmap,
  73. .kunmap = tegra_bo_kunmap,
  74. };
  75. static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
  76. {
  77. dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
  78. }
  79. struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
  80. unsigned long flags)
  81. {
  82. struct tegra_bo *bo;
  83. int err;
  84. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  85. if (!bo)
  86. return ERR_PTR(-ENOMEM);
  87. host1x_bo_init(&bo->base, &tegra_bo_ops);
  88. size = round_up(size, PAGE_SIZE);
  89. bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
  90. GFP_KERNEL | __GFP_NOWARN);
  91. if (!bo->vaddr) {
  92. dev_err(drm->dev, "failed to allocate buffer with size %u\n",
  93. size);
  94. err = -ENOMEM;
  95. goto err_dma;
  96. }
  97. err = drm_gem_object_init(drm, &bo->gem, size);
  98. if (err)
  99. goto err_init;
  100. err = drm_gem_create_mmap_offset(&bo->gem);
  101. if (err)
  102. goto err_mmap;
  103. if (flags & DRM_TEGRA_GEM_CREATE_TILED)
  104. bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
  105. if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
  106. bo->flags |= TEGRA_BO_BOTTOM_UP;
  107. return bo;
  108. err_mmap:
  109. drm_gem_object_release(&bo->gem);
  110. err_init:
  111. tegra_bo_destroy(drm, bo);
  112. err_dma:
  113. kfree(bo);
  114. return ERR_PTR(err);
  115. }
  116. struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
  117. struct drm_device *drm,
  118. unsigned int size,
  119. unsigned long flags,
  120. unsigned int *handle)
  121. {
  122. struct tegra_bo *bo;
  123. int ret;
  124. bo = tegra_bo_create(drm, size, flags);
  125. if (IS_ERR(bo))
  126. return bo;
  127. ret = drm_gem_handle_create(file, &bo->gem, handle);
  128. if (ret)
  129. goto err;
  130. drm_gem_object_unreference_unlocked(&bo->gem);
  131. return bo;
  132. err:
  133. tegra_bo_free_object(&bo->gem);
  134. return ERR_PTR(ret);
  135. }
  136. static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
  137. struct dma_buf *buf)
  138. {
  139. struct dma_buf_attachment *attach;
  140. struct tegra_bo *bo;
  141. ssize_t size;
  142. int err;
  143. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  144. if (!bo)
  145. return ERR_PTR(-ENOMEM);
  146. host1x_bo_init(&bo->base, &tegra_bo_ops);
  147. size = round_up(buf->size, PAGE_SIZE);
  148. err = drm_gem_object_init(drm, &bo->gem, size);
  149. if (err < 0)
  150. goto free;
  151. err = drm_gem_create_mmap_offset(&bo->gem);
  152. if (err < 0)
  153. goto release;
  154. attach = dma_buf_attach(buf, drm->dev);
  155. if (IS_ERR(attach)) {
  156. err = PTR_ERR(attach);
  157. goto free_mmap;
  158. }
  159. get_dma_buf(buf);
  160. bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
  161. if (!bo->sgt) {
  162. err = -ENOMEM;
  163. goto detach;
  164. }
  165. if (IS_ERR(bo->sgt)) {
  166. err = PTR_ERR(bo->sgt);
  167. goto detach;
  168. }
  169. if (bo->sgt->nents > 1) {
  170. err = -EINVAL;
  171. goto detach;
  172. }
  173. bo->paddr = sg_dma_address(bo->sgt->sgl);
  174. bo->gem.import_attach = attach;
  175. return bo;
  176. detach:
  177. if (!IS_ERR_OR_NULL(bo->sgt))
  178. dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
  179. dma_buf_detach(buf, attach);
  180. dma_buf_put(buf);
  181. free_mmap:
  182. drm_gem_free_mmap_offset(&bo->gem);
  183. release:
  184. drm_gem_object_release(&bo->gem);
  185. free:
  186. kfree(bo);
  187. return ERR_PTR(err);
  188. }
  189. void tegra_bo_free_object(struct drm_gem_object *gem)
  190. {
  191. struct tegra_bo *bo = to_tegra_bo(gem);
  192. if (gem->import_attach) {
  193. dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
  194. DMA_TO_DEVICE);
  195. drm_prime_gem_destroy(gem, NULL);
  196. } else {
  197. tegra_bo_destroy(gem->dev, bo);
  198. }
  199. drm_gem_free_mmap_offset(gem);
  200. drm_gem_object_release(gem);
  201. kfree(bo);
  202. }
  203. int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
  204. struct drm_mode_create_dumb *args)
  205. {
  206. int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  207. struct tegra_drm *tegra = drm->dev_private;
  208. struct tegra_bo *bo;
  209. min_pitch = round_up(min_pitch, tegra->pitch_align);
  210. if (args->pitch < min_pitch)
  211. args->pitch = min_pitch;
  212. if (args->size < args->pitch * args->height)
  213. args->size = args->pitch * args->height;
  214. bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
  215. &args->handle);
  216. if (IS_ERR(bo))
  217. return PTR_ERR(bo);
  218. return 0;
  219. }
  220. int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
  221. uint32_t handle, uint64_t *offset)
  222. {
  223. struct drm_gem_object *gem;
  224. struct tegra_bo *bo;
  225. mutex_lock(&drm->struct_mutex);
  226. gem = drm_gem_object_lookup(drm, file, handle);
  227. if (!gem) {
  228. dev_err(drm->dev, "failed to lookup GEM object\n");
  229. mutex_unlock(&drm->struct_mutex);
  230. return -EINVAL;
  231. }
  232. bo = to_tegra_bo(gem);
  233. *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  234. drm_gem_object_unreference(gem);
  235. mutex_unlock(&drm->struct_mutex);
  236. return 0;
  237. }
  238. const struct vm_operations_struct tegra_bo_vm_ops = {
  239. .open = drm_gem_vm_open,
  240. .close = drm_gem_vm_close,
  241. };
  242. int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
  243. {
  244. struct drm_gem_object *gem;
  245. struct tegra_bo *bo;
  246. int ret;
  247. ret = drm_gem_mmap(file, vma);
  248. if (ret)
  249. return ret;
  250. gem = vma->vm_private_data;
  251. bo = to_tegra_bo(gem);
  252. ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
  253. vma->vm_end - vma->vm_start, vma->vm_page_prot);
  254. if (ret)
  255. drm_gem_vm_close(vma);
  256. return ret;
  257. }
  258. static struct sg_table *
  259. tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
  260. enum dma_data_direction dir)
  261. {
  262. struct drm_gem_object *gem = attach->dmabuf->priv;
  263. struct tegra_bo *bo = to_tegra_bo(gem);
  264. struct sg_table *sgt;
  265. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  266. if (!sgt)
  267. return NULL;
  268. if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
  269. kfree(sgt);
  270. return NULL;
  271. }
  272. sg_dma_address(sgt->sgl) = bo->paddr;
  273. sg_dma_len(sgt->sgl) = gem->size;
  274. return sgt;
  275. }
  276. static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
  277. struct sg_table *sgt,
  278. enum dma_data_direction dir)
  279. {
  280. sg_free_table(sgt);
  281. kfree(sgt);
  282. }
  283. static void tegra_gem_prime_release(struct dma_buf *buf)
  284. {
  285. drm_gem_dmabuf_release(buf);
  286. }
  287. static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
  288. unsigned long page)
  289. {
  290. return NULL;
  291. }
  292. static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
  293. unsigned long page,
  294. void *addr)
  295. {
  296. }
  297. static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
  298. {
  299. return NULL;
  300. }
  301. static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
  302. void *addr)
  303. {
  304. }
  305. static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
  306. {
  307. return -EINVAL;
  308. }
  309. static void *tegra_gem_prime_vmap(struct dma_buf *buf)
  310. {
  311. struct drm_gem_object *gem = buf->priv;
  312. struct tegra_bo *bo = to_tegra_bo(gem);
  313. return bo->vaddr;
  314. }
  315. static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
  316. {
  317. }
  318. static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
  319. .map_dma_buf = tegra_gem_prime_map_dma_buf,
  320. .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
  321. .release = tegra_gem_prime_release,
  322. .kmap_atomic = tegra_gem_prime_kmap_atomic,
  323. .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
  324. .kmap = tegra_gem_prime_kmap,
  325. .kunmap = tegra_gem_prime_kunmap,
  326. .mmap = tegra_gem_prime_mmap,
  327. .vmap = tegra_gem_prime_vmap,
  328. .vunmap = tegra_gem_prime_vunmap,
  329. };
  330. struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
  331. struct drm_gem_object *gem,
  332. int flags)
  333. {
  334. return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
  335. flags, NULL);
  336. }
  337. struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
  338. struct dma_buf *buf)
  339. {
  340. struct tegra_bo *bo;
  341. if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
  342. struct drm_gem_object *gem = buf->priv;
  343. if (gem->dev == drm) {
  344. drm_gem_object_reference(gem);
  345. return gem;
  346. }
  347. }
  348. bo = tegra_bo_import(drm, buf);
  349. if (IS_ERR(bo))
  350. return ERR_CAST(bo);
  351. return &bo->gem;
  352. }