gem.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639
  1. /*
  2. * NVIDIA Tegra DRM GEM helper functions
  3. *
  4. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5. * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
  6. *
  7. * Based on the GEM/CMA helpers
  8. *
  9. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. #include <linux/dma-buf.h>
  16. #include <linux/iommu.h>
  17. #include <drm/tegra_drm.h>
  18. #include "drm.h"
  19. #include "gem.h"
  20. static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
  21. {
  22. return container_of(bo, struct tegra_bo, base);
  23. }
  24. static void tegra_bo_put(struct host1x_bo *bo)
  25. {
  26. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  27. drm_gem_object_unreference_unlocked(&obj->gem);
  28. }
  29. static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
  30. {
  31. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  32. return obj->paddr;
  33. }
  34. static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
  35. {
  36. }
  37. static void *tegra_bo_mmap(struct host1x_bo *bo)
  38. {
  39. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  40. return obj->vaddr;
  41. }
  42. static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
  43. {
  44. }
  45. static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
  46. {
  47. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  48. return obj->vaddr + page * PAGE_SIZE;
  49. }
  50. static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
  51. void *addr)
  52. {
  53. }
  54. static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
  55. {
  56. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  57. drm_gem_object_reference(&obj->gem);
  58. return bo;
  59. }
  60. static const struct host1x_bo_ops tegra_bo_ops = {
  61. .get = tegra_bo_get,
  62. .put = tegra_bo_put,
  63. .pin = tegra_bo_pin,
  64. .unpin = tegra_bo_unpin,
  65. .mmap = tegra_bo_mmap,
  66. .munmap = tegra_bo_munmap,
  67. .kmap = tegra_bo_kmap,
  68. .kunmap = tegra_bo_kunmap,
  69. };
  70. static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
  71. {
  72. int prot = IOMMU_READ | IOMMU_WRITE;
  73. ssize_t err;
  74. if (bo->mm)
  75. return -EBUSY;
  76. bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
  77. if (!bo->mm)
  78. return -ENOMEM;
  79. err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
  80. PAGE_SIZE, 0, 0, 0);
  81. if (err < 0) {
  82. dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
  83. err);
  84. goto free;
  85. }
  86. bo->paddr = bo->mm->start;
  87. err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
  88. bo->sgt->nents, prot);
  89. if (err < 0) {
  90. dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
  91. goto remove;
  92. }
  93. bo->size = err;
  94. return 0;
  95. remove:
  96. drm_mm_remove_node(bo->mm);
  97. free:
  98. kfree(bo->mm);
  99. return err;
  100. }
  101. static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
  102. {
  103. if (!bo->mm)
  104. return 0;
  105. iommu_unmap(tegra->domain, bo->paddr, bo->size);
  106. drm_mm_remove_node(bo->mm);
  107. kfree(bo->mm);
  108. return 0;
  109. }
  110. static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
  111. size_t size)
  112. {
  113. struct tegra_bo *bo;
  114. int err;
  115. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  116. if (!bo)
  117. return ERR_PTR(-ENOMEM);
  118. host1x_bo_init(&bo->base, &tegra_bo_ops);
  119. size = round_up(size, PAGE_SIZE);
  120. err = drm_gem_object_init(drm, &bo->gem, size);
  121. if (err < 0)
  122. goto free;
  123. err = drm_gem_create_mmap_offset(&bo->gem);
  124. if (err < 0)
  125. goto release;
  126. return bo;
  127. release:
  128. drm_gem_object_release(&bo->gem);
  129. free:
  130. kfree(bo);
  131. return ERR_PTR(err);
  132. }
  133. static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
  134. {
  135. if (bo->pages) {
  136. drm_gem_put_pages(&bo->gem, bo->pages, true, true);
  137. sg_free_table(bo->sgt);
  138. kfree(bo->sgt);
  139. } else if (bo->vaddr) {
  140. dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
  141. bo->paddr);
  142. }
  143. }
  144. static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
  145. {
  146. struct scatterlist *s;
  147. unsigned int i;
  148. bo->pages = drm_gem_get_pages(&bo->gem);
  149. if (IS_ERR(bo->pages))
  150. return PTR_ERR(bo->pages);
  151. bo->num_pages = bo->gem.size >> PAGE_SHIFT;
  152. bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
  153. if (IS_ERR(bo->sgt))
  154. goto put_pages;
  155. /*
  156. * Fake up the SG table so that dma_sync_sg_for_device() can be used
  157. * to flush the pages associated with it.
  158. *
  159. * TODO: Replace this by drm_clflash_sg() once it can be implemented
  160. * without relying on symbols that are not exported.
  161. */
  162. for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
  163. sg_dma_address(s) = sg_phys(s);
  164. dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
  165. DMA_TO_DEVICE);
  166. return 0;
  167. put_pages:
  168. drm_gem_put_pages(&bo->gem, bo->pages, false, false);
  169. return PTR_ERR(bo->sgt);
  170. }
  171. static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
  172. {
  173. struct tegra_drm *tegra = drm->dev_private;
  174. int err;
  175. if (tegra->domain) {
  176. err = tegra_bo_get_pages(drm, bo);
  177. if (err < 0)
  178. return err;
  179. err = tegra_bo_iommu_map(tegra, bo);
  180. if (err < 0) {
  181. tegra_bo_free(drm, bo);
  182. return err;
  183. }
  184. } else {
  185. size_t size = bo->gem.size;
  186. bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
  187. GFP_KERNEL | __GFP_NOWARN);
  188. if (!bo->vaddr) {
  189. dev_err(drm->dev,
  190. "failed to allocate buffer of size %zu\n",
  191. size);
  192. return -ENOMEM;
  193. }
  194. }
  195. return 0;
  196. }
  197. struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
  198. unsigned long flags)
  199. {
  200. struct tegra_bo *bo;
  201. int err;
  202. bo = tegra_bo_alloc_object(drm, size);
  203. if (IS_ERR(bo))
  204. return bo;
  205. err = tegra_bo_alloc(drm, bo);
  206. if (err < 0)
  207. goto release;
  208. if (flags & DRM_TEGRA_GEM_CREATE_TILED)
  209. bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
  210. if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
  211. bo->flags |= TEGRA_BO_BOTTOM_UP;
  212. return bo;
  213. release:
  214. drm_gem_object_release(&bo->gem);
  215. kfree(bo);
  216. return ERR_PTR(err);
  217. }
  218. struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
  219. struct drm_device *drm,
  220. size_t size,
  221. unsigned long flags,
  222. u32 *handle)
  223. {
  224. struct tegra_bo *bo;
  225. int err;
  226. bo = tegra_bo_create(drm, size, flags);
  227. if (IS_ERR(bo))
  228. return bo;
  229. err = drm_gem_handle_create(file, &bo->gem, handle);
  230. if (err) {
  231. tegra_bo_free_object(&bo->gem);
  232. return ERR_PTR(err);
  233. }
  234. drm_gem_object_unreference_unlocked(&bo->gem);
  235. return bo;
  236. }
  237. static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
  238. struct dma_buf *buf)
  239. {
  240. struct tegra_drm *tegra = drm->dev_private;
  241. struct dma_buf_attachment *attach;
  242. struct tegra_bo *bo;
  243. int err;
  244. bo = tegra_bo_alloc_object(drm, buf->size);
  245. if (IS_ERR(bo))
  246. return bo;
  247. attach = dma_buf_attach(buf, drm->dev);
  248. if (IS_ERR(attach)) {
  249. err = PTR_ERR(attach);
  250. goto free;
  251. }
  252. get_dma_buf(buf);
  253. bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
  254. if (!bo->sgt) {
  255. err = -ENOMEM;
  256. goto detach;
  257. }
  258. if (IS_ERR(bo->sgt)) {
  259. err = PTR_ERR(bo->sgt);
  260. goto detach;
  261. }
  262. if (tegra->domain) {
  263. err = tegra_bo_iommu_map(tegra, bo);
  264. if (err < 0)
  265. goto detach;
  266. } else {
  267. if (bo->sgt->nents > 1) {
  268. err = -EINVAL;
  269. goto detach;
  270. }
  271. bo->paddr = sg_dma_address(bo->sgt->sgl);
  272. }
  273. bo->gem.import_attach = attach;
  274. return bo;
  275. detach:
  276. if (!IS_ERR_OR_NULL(bo->sgt))
  277. dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
  278. dma_buf_detach(buf, attach);
  279. dma_buf_put(buf);
  280. free:
  281. drm_gem_object_release(&bo->gem);
  282. kfree(bo);
  283. return ERR_PTR(err);
  284. }
  285. void tegra_bo_free_object(struct drm_gem_object *gem)
  286. {
  287. struct tegra_drm *tegra = gem->dev->dev_private;
  288. struct tegra_bo *bo = to_tegra_bo(gem);
  289. if (tegra->domain)
  290. tegra_bo_iommu_unmap(tegra, bo);
  291. if (gem->import_attach) {
  292. dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
  293. DMA_TO_DEVICE);
  294. drm_prime_gem_destroy(gem, NULL);
  295. } else {
  296. tegra_bo_free(gem->dev, bo);
  297. }
  298. drm_gem_object_release(gem);
  299. kfree(bo);
  300. }
  301. int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
  302. struct drm_mode_create_dumb *args)
  303. {
  304. unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  305. struct tegra_drm *tegra = drm->dev_private;
  306. struct tegra_bo *bo;
  307. args->pitch = round_up(min_pitch, tegra->pitch_align);
  308. args->size = args->pitch * args->height;
  309. bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
  310. &args->handle);
  311. if (IS_ERR(bo))
  312. return PTR_ERR(bo);
  313. return 0;
  314. }
  315. int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
  316. u32 handle, u64 *offset)
  317. {
  318. struct drm_gem_object *gem;
  319. struct tegra_bo *bo;
  320. gem = drm_gem_object_lookup(drm, file, handle);
  321. if (!gem) {
  322. dev_err(drm->dev, "failed to lookup GEM object\n");
  323. return -EINVAL;
  324. }
  325. bo = to_tegra_bo(gem);
  326. *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  327. drm_gem_object_unreference_unlocked(gem);
  328. return 0;
  329. }
  330. static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  331. {
  332. struct drm_gem_object *gem = vma->vm_private_data;
  333. struct tegra_bo *bo = to_tegra_bo(gem);
  334. struct page *page;
  335. pgoff_t offset;
  336. int err;
  337. if (!bo->pages)
  338. return VM_FAULT_SIGBUS;
  339. offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
  340. page = bo->pages[offset];
  341. err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
  342. switch (err) {
  343. case -EAGAIN:
  344. case 0:
  345. case -ERESTARTSYS:
  346. case -EINTR:
  347. case -EBUSY:
  348. return VM_FAULT_NOPAGE;
  349. case -ENOMEM:
  350. return VM_FAULT_OOM;
  351. }
  352. return VM_FAULT_SIGBUS;
  353. }
  354. const struct vm_operations_struct tegra_bo_vm_ops = {
  355. .fault = tegra_bo_fault,
  356. .open = drm_gem_vm_open,
  357. .close = drm_gem_vm_close,
  358. };
  359. int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
  360. {
  361. struct drm_gem_object *gem;
  362. struct tegra_bo *bo;
  363. int ret;
  364. ret = drm_gem_mmap(file, vma);
  365. if (ret)
  366. return ret;
  367. gem = vma->vm_private_data;
  368. bo = to_tegra_bo(gem);
  369. if (!bo->pages) {
  370. unsigned long vm_pgoff = vma->vm_pgoff;
  371. vma->vm_flags &= ~VM_PFNMAP;
  372. vma->vm_pgoff = 0;
  373. ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
  374. bo->paddr, gem->size);
  375. if (ret) {
  376. drm_gem_vm_close(vma);
  377. return ret;
  378. }
  379. vma->vm_pgoff = vm_pgoff;
  380. } else {
  381. pgprot_t prot = vm_get_page_prot(vma->vm_flags);
  382. vma->vm_flags |= VM_MIXEDMAP;
  383. vma->vm_flags &= ~VM_PFNMAP;
  384. vma->vm_page_prot = pgprot_writecombine(prot);
  385. }
  386. return 0;
  387. }
  388. static struct sg_table *
  389. tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
  390. enum dma_data_direction dir)
  391. {
  392. struct drm_gem_object *gem = attach->dmabuf->priv;
  393. struct tegra_bo *bo = to_tegra_bo(gem);
  394. struct sg_table *sgt;
  395. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  396. if (!sgt)
  397. return NULL;
  398. if (bo->pages) {
  399. struct scatterlist *sg;
  400. unsigned int i;
  401. if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
  402. goto free;
  403. for_each_sg(sgt->sgl, sg, bo->num_pages, i)
  404. sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
  405. if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
  406. goto free;
  407. } else {
  408. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  409. goto free;
  410. sg_dma_address(sgt->sgl) = bo->paddr;
  411. sg_dma_len(sgt->sgl) = gem->size;
  412. }
  413. return sgt;
  414. free:
  415. sg_free_table(sgt);
  416. kfree(sgt);
  417. return NULL;
  418. }
  419. static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
  420. struct sg_table *sgt,
  421. enum dma_data_direction dir)
  422. {
  423. struct drm_gem_object *gem = attach->dmabuf->priv;
  424. struct tegra_bo *bo = to_tegra_bo(gem);
  425. if (bo->pages)
  426. dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
  427. sg_free_table(sgt);
  428. kfree(sgt);
  429. }
  430. static void tegra_gem_prime_release(struct dma_buf *buf)
  431. {
  432. drm_gem_dmabuf_release(buf);
  433. }
  434. static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
  435. unsigned long page)
  436. {
  437. return NULL;
  438. }
  439. static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
  440. unsigned long page,
  441. void *addr)
  442. {
  443. }
  444. static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
  445. {
  446. return NULL;
  447. }
  448. static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
  449. void *addr)
  450. {
  451. }
  452. static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
  453. {
  454. return -EINVAL;
  455. }
  456. static void *tegra_gem_prime_vmap(struct dma_buf *buf)
  457. {
  458. struct drm_gem_object *gem = buf->priv;
  459. struct tegra_bo *bo = to_tegra_bo(gem);
  460. return bo->vaddr;
  461. }
  462. static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
  463. {
  464. }
  465. static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
  466. .map_dma_buf = tegra_gem_prime_map_dma_buf,
  467. .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
  468. .release = tegra_gem_prime_release,
  469. .kmap_atomic = tegra_gem_prime_kmap_atomic,
  470. .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
  471. .kmap = tegra_gem_prime_kmap,
  472. .kunmap = tegra_gem_prime_kunmap,
  473. .mmap = tegra_gem_prime_mmap,
  474. .vmap = tegra_gem_prime_vmap,
  475. .vunmap = tegra_gem_prime_vunmap,
  476. };
  477. struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
  478. struct drm_gem_object *gem,
  479. int flags)
  480. {
  481. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  482. exp_info.ops = &tegra_gem_prime_dmabuf_ops;
  483. exp_info.size = gem->size;
  484. exp_info.flags = flags;
  485. exp_info.priv = gem;
  486. return dma_buf_export(&exp_info);
  487. }
  488. struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
  489. struct dma_buf *buf)
  490. {
  491. struct tegra_bo *bo;
  492. if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
  493. struct drm_gem_object *gem = buf->priv;
  494. if (gem->dev == drm) {
  495. drm_gem_object_reference(gem);
  496. return gem;
  497. }
  498. }
  499. bo = tegra_bo_import(drm, buf);
  500. if (IS_ERR(bo))
  501. return ERR_CAST(bo);
  502. return &bo->gem;
  503. }