gem.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /*
  2. * NVIDIA Tegra DRM GEM helper functions
  3. *
  4. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5. * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
  6. *
  7. * Based on the GEM/CMA helpers
  8. *
  9. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. #include <linux/dma-buf.h>
  16. #include <drm/tegra_drm.h>
  17. #include "drm.h"
  18. #include "gem.h"
  19. static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
  20. {
  21. return container_of(bo, struct tegra_bo, base);
  22. }
  23. static void tegra_bo_put(struct host1x_bo *bo)
  24. {
  25. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  26. struct drm_device *drm = obj->gem.dev;
  27. mutex_lock(&drm->struct_mutex);
  28. drm_gem_object_unreference(&obj->gem);
  29. mutex_unlock(&drm->struct_mutex);
  30. }
  31. static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
  32. {
  33. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  34. return obj->paddr;
  35. }
  36. static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
  37. {
  38. }
  39. static void *tegra_bo_mmap(struct host1x_bo *bo)
  40. {
  41. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  42. return obj->vaddr;
  43. }
  44. static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
  45. {
  46. }
  47. static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
  48. {
  49. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  50. return obj->vaddr + page * PAGE_SIZE;
  51. }
  52. static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
  53. void *addr)
  54. {
  55. }
  56. static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
  57. {
  58. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  59. struct drm_device *drm = obj->gem.dev;
  60. mutex_lock(&drm->struct_mutex);
  61. drm_gem_object_reference(&obj->gem);
  62. mutex_unlock(&drm->struct_mutex);
  63. return bo;
  64. }
  65. static const struct host1x_bo_ops tegra_bo_ops = {
  66. .get = tegra_bo_get,
  67. .put = tegra_bo_put,
  68. .pin = tegra_bo_pin,
  69. .unpin = tegra_bo_unpin,
  70. .mmap = tegra_bo_mmap,
  71. .munmap = tegra_bo_munmap,
  72. .kmap = tegra_bo_kmap,
  73. .kunmap = tegra_bo_kunmap,
  74. };
  75. static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
  76. size_t size)
  77. {
  78. struct tegra_bo *bo;
  79. int err;
  80. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  81. if (!bo)
  82. return ERR_PTR(-ENOMEM);
  83. host1x_bo_init(&bo->base, &tegra_bo_ops);
  84. size = round_up(size, PAGE_SIZE);
  85. err = drm_gem_object_init(drm, &bo->gem, size);
  86. if (err < 0)
  87. goto free;
  88. err = drm_gem_create_mmap_offset(&bo->gem);
  89. if (err < 0)
  90. goto release;
  91. return bo;
  92. release:
  93. drm_gem_object_release(&bo->gem);
  94. free:
  95. kfree(bo);
  96. return ERR_PTR(err);
  97. }
  98. static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
  99. {
  100. dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
  101. }
  102. struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
  103. unsigned long flags)
  104. {
  105. struct tegra_bo *bo;
  106. int err;
  107. bo = tegra_bo_alloc_object(drm, size);
  108. if (IS_ERR(bo))
  109. return bo;
  110. bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
  111. GFP_KERNEL | __GFP_NOWARN);
  112. if (!bo->vaddr) {
  113. dev_err(drm->dev, "failed to allocate buffer with size %u\n",
  114. size);
  115. err = -ENOMEM;
  116. goto err_dma;
  117. }
  118. if (flags & DRM_TEGRA_GEM_CREATE_TILED)
  119. bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
  120. if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
  121. bo->flags |= TEGRA_BO_BOTTOM_UP;
  122. return bo;
  123. err_dma:
  124. kfree(bo);
  125. return ERR_PTR(err);
  126. }
  127. struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
  128. struct drm_device *drm,
  129. unsigned int size,
  130. unsigned long flags,
  131. unsigned int *handle)
  132. {
  133. struct tegra_bo *bo;
  134. int err;
  135. bo = tegra_bo_create(drm, size, flags);
  136. if (IS_ERR(bo))
  137. return bo;
  138. err = drm_gem_handle_create(file, &bo->gem, handle);
  139. if (err) {
  140. tegra_bo_free_object(&bo->gem);
  141. return ERR_PTR(err);
  142. }
  143. drm_gem_object_unreference_unlocked(&bo->gem);
  144. return bo;
  145. }
  146. static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
  147. struct dma_buf *buf)
  148. {
  149. struct dma_buf_attachment *attach;
  150. struct tegra_bo *bo;
  151. int err;
  152. bo = tegra_bo_alloc_object(drm, buf->size);
  153. if (IS_ERR(bo))
  154. return bo;
  155. attach = dma_buf_attach(buf, drm->dev);
  156. if (IS_ERR(attach)) {
  157. err = PTR_ERR(attach);
  158. goto free;
  159. }
  160. get_dma_buf(buf);
  161. bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
  162. if (!bo->sgt) {
  163. err = -ENOMEM;
  164. goto detach;
  165. }
  166. if (IS_ERR(bo->sgt)) {
  167. err = PTR_ERR(bo->sgt);
  168. goto detach;
  169. }
  170. if (bo->sgt->nents > 1) {
  171. err = -EINVAL;
  172. goto detach;
  173. }
  174. bo->paddr = sg_dma_address(bo->sgt->sgl);
  175. bo->gem.import_attach = attach;
  176. return bo;
  177. detach:
  178. if (!IS_ERR_OR_NULL(bo->sgt))
  179. dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
  180. dma_buf_detach(buf, attach);
  181. dma_buf_put(buf);
  182. free:
  183. drm_gem_object_release(&bo->gem);
  184. kfree(bo);
  185. return ERR_PTR(err);
  186. }
  187. void tegra_bo_free_object(struct drm_gem_object *gem)
  188. {
  189. struct tegra_bo *bo = to_tegra_bo(gem);
  190. if (gem->import_attach) {
  191. dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
  192. DMA_TO_DEVICE);
  193. drm_prime_gem_destroy(gem, NULL);
  194. } else {
  195. tegra_bo_destroy(gem->dev, bo);
  196. }
  197. drm_gem_object_release(gem);
  198. kfree(bo);
  199. }
  200. int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
  201. struct drm_mode_create_dumb *args)
  202. {
  203. int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  204. struct tegra_drm *tegra = drm->dev_private;
  205. struct tegra_bo *bo;
  206. min_pitch = round_up(min_pitch, tegra->pitch_align);
  207. if (args->pitch < min_pitch)
  208. args->pitch = min_pitch;
  209. if (args->size < args->pitch * args->height)
  210. args->size = args->pitch * args->height;
  211. bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
  212. &args->handle);
  213. if (IS_ERR(bo))
  214. return PTR_ERR(bo);
  215. return 0;
  216. }
  217. int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
  218. uint32_t handle, uint64_t *offset)
  219. {
  220. struct drm_gem_object *gem;
  221. struct tegra_bo *bo;
  222. mutex_lock(&drm->struct_mutex);
  223. gem = drm_gem_object_lookup(drm, file, handle);
  224. if (!gem) {
  225. dev_err(drm->dev, "failed to lookup GEM object\n");
  226. mutex_unlock(&drm->struct_mutex);
  227. return -EINVAL;
  228. }
  229. bo = to_tegra_bo(gem);
  230. *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  231. drm_gem_object_unreference(gem);
  232. mutex_unlock(&drm->struct_mutex);
  233. return 0;
  234. }
  235. const struct vm_operations_struct tegra_bo_vm_ops = {
  236. .open = drm_gem_vm_open,
  237. .close = drm_gem_vm_close,
  238. };
  239. int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
  240. {
  241. unsigned long vm_pgoff = vma->vm_pgoff;
  242. struct drm_gem_object *gem;
  243. struct tegra_bo *bo;
  244. int ret;
  245. ret = drm_gem_mmap(file, vma);
  246. if (ret)
  247. return ret;
  248. gem = vma->vm_private_data;
  249. bo = to_tegra_bo(gem);
  250. vma->vm_flags &= ~VM_PFNMAP;
  251. vma->vm_pgoff = 0;
  252. ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr, bo->paddr,
  253. gem->size);
  254. if (ret) {
  255. drm_gem_vm_close(vma);
  256. return ret;
  257. }
  258. vma->vm_pgoff = vm_pgoff;
  259. return 0;
  260. }
  261. static struct sg_table *
  262. tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
  263. enum dma_data_direction dir)
  264. {
  265. struct drm_gem_object *gem = attach->dmabuf->priv;
  266. struct tegra_bo *bo = to_tegra_bo(gem);
  267. struct sg_table *sgt;
  268. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  269. if (!sgt)
  270. return NULL;
  271. if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
  272. kfree(sgt);
  273. return NULL;
  274. }
  275. sg_dma_address(sgt->sgl) = bo->paddr;
  276. sg_dma_len(sgt->sgl) = gem->size;
  277. return sgt;
  278. }
  279. static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
  280. struct sg_table *sgt,
  281. enum dma_data_direction dir)
  282. {
  283. sg_free_table(sgt);
  284. kfree(sgt);
  285. }
  286. static void tegra_gem_prime_release(struct dma_buf *buf)
  287. {
  288. drm_gem_dmabuf_release(buf);
  289. }
  290. static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
  291. unsigned long page)
  292. {
  293. return NULL;
  294. }
  295. static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
  296. unsigned long page,
  297. void *addr)
  298. {
  299. }
  300. static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
  301. {
  302. return NULL;
  303. }
  304. static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
  305. void *addr)
  306. {
  307. }
  308. static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
  309. {
  310. return -EINVAL;
  311. }
  312. static void *tegra_gem_prime_vmap(struct dma_buf *buf)
  313. {
  314. struct drm_gem_object *gem = buf->priv;
  315. struct tegra_bo *bo = to_tegra_bo(gem);
  316. return bo->vaddr;
  317. }
  318. static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
  319. {
  320. }
  321. static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
  322. .map_dma_buf = tegra_gem_prime_map_dma_buf,
  323. .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
  324. .release = tegra_gem_prime_release,
  325. .kmap_atomic = tegra_gem_prime_kmap_atomic,
  326. .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
  327. .kmap = tegra_gem_prime_kmap,
  328. .kunmap = tegra_gem_prime_kunmap,
  329. .mmap = tegra_gem_prime_mmap,
  330. .vmap = tegra_gem_prime_vmap,
  331. .vunmap = tegra_gem_prime_vunmap,
  332. };
  333. struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
  334. struct drm_gem_object *gem,
  335. int flags)
  336. {
  337. return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
  338. flags, NULL);
  339. }
  340. struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
  341. struct dma_buf *buf)
  342. {
  343. struct tegra_bo *bo;
  344. if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
  345. struct drm_gem_object *gem = buf->priv;
  346. if (gem->dev == drm) {
  347. drm_gem_object_reference(gem);
  348. return gem;
  349. }
  350. }
  351. bo = tegra_bo_import(drm, buf);
  352. if (IS_ERR(bo))
  353. return ERR_CAST(bo);
  354. return &bo->gem;
  355. }