gem.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. /*
  2. * NVIDIA Tegra DRM GEM helper functions
  3. *
  4. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5. * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
  6. *
  7. * Based on the GEM/CMA helpers
  8. *
  9. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. #include <linux/dma-buf.h>
  16. #include <linux/iommu.h>
  17. #include <drm/tegra_drm.h>
  18. #include "drm.h"
  19. #include "gem.h"
  20. static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
  21. {
  22. return container_of(bo, struct tegra_bo, base);
  23. }
  24. static void tegra_bo_put(struct host1x_bo *bo)
  25. {
  26. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  27. drm_gem_object_unreference_unlocked(&obj->gem);
  28. }
  29. static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
  30. {
  31. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  32. *sgt = obj->sgt;
  33. return obj->paddr;
  34. }
  35. static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
  36. {
  37. }
  38. static void *tegra_bo_mmap(struct host1x_bo *bo)
  39. {
  40. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  41. if (obj->vaddr)
  42. return obj->vaddr;
  43. else if (obj->gem.import_attach)
  44. return dma_buf_vmap(obj->gem.import_attach->dmabuf);
  45. else
  46. return vmap(obj->pages, obj->num_pages, VM_MAP,
  47. pgprot_writecombine(PAGE_KERNEL));
  48. }
  49. static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
  50. {
  51. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  52. if (obj->vaddr)
  53. return;
  54. else if (obj->gem.import_attach)
  55. dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
  56. else
  57. vunmap(addr);
  58. }
  59. static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
  60. {
  61. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  62. if (obj->vaddr)
  63. return obj->vaddr + page * PAGE_SIZE;
  64. else if (obj->gem.import_attach)
  65. return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
  66. else
  67. return vmap(obj->pages + page, 1, VM_MAP,
  68. pgprot_writecombine(PAGE_KERNEL));
  69. }
  70. static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
  71. void *addr)
  72. {
  73. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  74. if (obj->vaddr)
  75. return;
  76. else if (obj->gem.import_attach)
  77. dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
  78. else
  79. vunmap(addr);
  80. }
  81. static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
  82. {
  83. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  84. drm_gem_object_reference(&obj->gem);
  85. return bo;
  86. }
  87. static const struct host1x_bo_ops tegra_bo_ops = {
  88. .get = tegra_bo_get,
  89. .put = tegra_bo_put,
  90. .pin = tegra_bo_pin,
  91. .unpin = tegra_bo_unpin,
  92. .mmap = tegra_bo_mmap,
  93. .munmap = tegra_bo_munmap,
  94. .kmap = tegra_bo_kmap,
  95. .kunmap = tegra_bo_kunmap,
  96. };
  97. static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
  98. {
  99. int prot = IOMMU_READ | IOMMU_WRITE;
  100. ssize_t err;
  101. if (bo->mm)
  102. return -EBUSY;
  103. bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
  104. if (!bo->mm)
  105. return -ENOMEM;
  106. mutex_lock(&tegra->mm_lock);
  107. err = drm_mm_insert_node_generic(&tegra->mm,
  108. bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
  109. if (err < 0) {
  110. dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
  111. err);
  112. goto unlock;
  113. }
  114. bo->paddr = bo->mm->start;
  115. err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
  116. bo->sgt->nents, prot);
  117. if (err < 0) {
  118. dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
  119. goto remove;
  120. }
  121. bo->size = err;
  122. mutex_unlock(&tegra->mm_lock);
  123. return 0;
  124. remove:
  125. drm_mm_remove_node(bo->mm);
  126. unlock:
  127. mutex_unlock(&tegra->mm_lock);
  128. kfree(bo->mm);
  129. return err;
  130. }
  131. static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
  132. {
  133. if (!bo->mm)
  134. return 0;
  135. mutex_lock(&tegra->mm_lock);
  136. iommu_unmap(tegra->domain, bo->paddr, bo->size);
  137. drm_mm_remove_node(bo->mm);
  138. mutex_unlock(&tegra->mm_lock);
  139. kfree(bo->mm);
  140. return 0;
  141. }
  142. static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
  143. size_t size)
  144. {
  145. struct tegra_bo *bo;
  146. int err;
  147. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  148. if (!bo)
  149. return ERR_PTR(-ENOMEM);
  150. host1x_bo_init(&bo->base, &tegra_bo_ops);
  151. size = round_up(size, PAGE_SIZE);
  152. err = drm_gem_object_init(drm, &bo->gem, size);
  153. if (err < 0)
  154. goto free;
  155. err = drm_gem_create_mmap_offset(&bo->gem);
  156. if (err < 0)
  157. goto release;
  158. return bo;
  159. release:
  160. drm_gem_object_release(&bo->gem);
  161. free:
  162. kfree(bo);
  163. return ERR_PTR(err);
  164. }
  165. static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
  166. {
  167. if (bo->pages) {
  168. drm_gem_put_pages(&bo->gem, bo->pages, true, true);
  169. sg_free_table(bo->sgt);
  170. kfree(bo->sgt);
  171. } else if (bo->vaddr) {
  172. dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
  173. }
  174. }
  175. static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
  176. {
  177. struct scatterlist *s;
  178. unsigned int i;
  179. bo->pages = drm_gem_get_pages(&bo->gem);
  180. if (IS_ERR(bo->pages))
  181. return PTR_ERR(bo->pages);
  182. bo->num_pages = bo->gem.size >> PAGE_SHIFT;
  183. bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
  184. if (IS_ERR(bo->sgt))
  185. goto put_pages;
  186. /*
  187. * Fake up the SG table so that dma_sync_sg_for_device() can be used
  188. * to flush the pages associated with it.
  189. *
  190. * TODO: Replace this by drm_clflash_sg() once it can be implemented
  191. * without relying on symbols that are not exported.
  192. */
  193. for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
  194. sg_dma_address(s) = sg_phys(s);
  195. dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
  196. DMA_TO_DEVICE);
  197. return 0;
  198. put_pages:
  199. drm_gem_put_pages(&bo->gem, bo->pages, false, false);
  200. return PTR_ERR(bo->sgt);
  201. }
  202. static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
  203. {
  204. struct tegra_drm *tegra = drm->dev_private;
  205. int err;
  206. if (tegra->domain) {
  207. err = tegra_bo_get_pages(drm, bo);
  208. if (err < 0)
  209. return err;
  210. err = tegra_bo_iommu_map(tegra, bo);
  211. if (err < 0) {
  212. tegra_bo_free(drm, bo);
  213. return err;
  214. }
  215. } else {
  216. size_t size = bo->gem.size;
  217. bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
  218. GFP_KERNEL | __GFP_NOWARN);
  219. if (!bo->vaddr) {
  220. dev_err(drm->dev,
  221. "failed to allocate buffer of size %zu\n",
  222. size);
  223. return -ENOMEM;
  224. }
  225. }
  226. return 0;
  227. }
  228. struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
  229. unsigned long flags)
  230. {
  231. struct tegra_bo *bo;
  232. int err;
  233. bo = tegra_bo_alloc_object(drm, size);
  234. if (IS_ERR(bo))
  235. return bo;
  236. err = tegra_bo_alloc(drm, bo);
  237. if (err < 0)
  238. goto release;
  239. if (flags & DRM_TEGRA_GEM_CREATE_TILED)
  240. bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
  241. if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
  242. bo->flags |= TEGRA_BO_BOTTOM_UP;
  243. return bo;
  244. release:
  245. drm_gem_object_release(&bo->gem);
  246. kfree(bo);
  247. return ERR_PTR(err);
  248. }
  249. struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
  250. struct drm_device *drm,
  251. size_t size,
  252. unsigned long flags,
  253. u32 *handle)
  254. {
  255. struct tegra_bo *bo;
  256. int err;
  257. bo = tegra_bo_create(drm, size, flags);
  258. if (IS_ERR(bo))
  259. return bo;
  260. err = drm_gem_handle_create(file, &bo->gem, handle);
  261. if (err) {
  262. tegra_bo_free_object(&bo->gem);
  263. return ERR_PTR(err);
  264. }
  265. drm_gem_object_unreference_unlocked(&bo->gem);
  266. return bo;
  267. }
  268. static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
  269. struct dma_buf *buf)
  270. {
  271. struct tegra_drm *tegra = drm->dev_private;
  272. struct dma_buf_attachment *attach;
  273. struct tegra_bo *bo;
  274. int err;
  275. bo = tegra_bo_alloc_object(drm, buf->size);
  276. if (IS_ERR(bo))
  277. return bo;
  278. attach = dma_buf_attach(buf, drm->dev);
  279. if (IS_ERR(attach)) {
  280. err = PTR_ERR(attach);
  281. goto free;
  282. }
  283. get_dma_buf(buf);
  284. bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
  285. if (IS_ERR(bo->sgt)) {
  286. err = PTR_ERR(bo->sgt);
  287. goto detach;
  288. }
  289. if (tegra->domain) {
  290. err = tegra_bo_iommu_map(tegra, bo);
  291. if (err < 0)
  292. goto detach;
  293. } else {
  294. if (bo->sgt->nents > 1) {
  295. err = -EINVAL;
  296. goto detach;
  297. }
  298. bo->paddr = sg_dma_address(bo->sgt->sgl);
  299. }
  300. bo->gem.import_attach = attach;
  301. return bo;
  302. detach:
  303. if (!IS_ERR_OR_NULL(bo->sgt))
  304. dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
  305. dma_buf_detach(buf, attach);
  306. dma_buf_put(buf);
  307. free:
  308. drm_gem_object_release(&bo->gem);
  309. kfree(bo);
  310. return ERR_PTR(err);
  311. }
  312. void tegra_bo_free_object(struct drm_gem_object *gem)
  313. {
  314. struct tegra_drm *tegra = gem->dev->dev_private;
  315. struct tegra_bo *bo = to_tegra_bo(gem);
  316. if (tegra->domain)
  317. tegra_bo_iommu_unmap(tegra, bo);
  318. if (gem->import_attach) {
  319. dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
  320. DMA_TO_DEVICE);
  321. drm_prime_gem_destroy(gem, NULL);
  322. } else {
  323. tegra_bo_free(gem->dev, bo);
  324. }
  325. drm_gem_object_release(gem);
  326. kfree(bo);
  327. }
  328. int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
  329. struct drm_mode_create_dumb *args)
  330. {
  331. unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  332. struct tegra_drm *tegra = drm->dev_private;
  333. struct tegra_bo *bo;
  334. args->pitch = round_up(min_pitch, tegra->pitch_align);
  335. args->size = args->pitch * args->height;
  336. bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
  337. &args->handle);
  338. if (IS_ERR(bo))
  339. return PTR_ERR(bo);
  340. return 0;
  341. }
  342. int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
  343. u32 handle, u64 *offset)
  344. {
  345. struct drm_gem_object *gem;
  346. struct tegra_bo *bo;
  347. gem = drm_gem_object_lookup(file, handle);
  348. if (!gem) {
  349. dev_err(drm->dev, "failed to lookup GEM object\n");
  350. return -EINVAL;
  351. }
  352. bo = to_tegra_bo(gem);
  353. *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  354. drm_gem_object_unreference_unlocked(gem);
  355. return 0;
  356. }
  357. static int tegra_bo_fault(struct vm_fault *vmf)
  358. {
  359. struct vm_area_struct *vma = vmf->vma;
  360. struct drm_gem_object *gem = vma->vm_private_data;
  361. struct tegra_bo *bo = to_tegra_bo(gem);
  362. struct page *page;
  363. pgoff_t offset;
  364. int err;
  365. if (!bo->pages)
  366. return VM_FAULT_SIGBUS;
  367. offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  368. page = bo->pages[offset];
  369. err = vm_insert_page(vma, vmf->address, page);
  370. switch (err) {
  371. case -EAGAIN:
  372. case 0:
  373. case -ERESTARTSYS:
  374. case -EINTR:
  375. case -EBUSY:
  376. return VM_FAULT_NOPAGE;
  377. case -ENOMEM:
  378. return VM_FAULT_OOM;
  379. }
  380. return VM_FAULT_SIGBUS;
  381. }
  382. const struct vm_operations_struct tegra_bo_vm_ops = {
  383. .fault = tegra_bo_fault,
  384. .open = drm_gem_vm_open,
  385. .close = drm_gem_vm_close,
  386. };
  387. int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
  388. {
  389. struct drm_gem_object *gem;
  390. struct tegra_bo *bo;
  391. int ret;
  392. ret = drm_gem_mmap(file, vma);
  393. if (ret)
  394. return ret;
  395. gem = vma->vm_private_data;
  396. bo = to_tegra_bo(gem);
  397. if (!bo->pages) {
  398. unsigned long vm_pgoff = vma->vm_pgoff;
  399. vma->vm_flags &= ~VM_PFNMAP;
  400. vma->vm_pgoff = 0;
  401. ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
  402. gem->size);
  403. if (ret) {
  404. drm_gem_vm_close(vma);
  405. return ret;
  406. }
  407. vma->vm_pgoff = vm_pgoff;
  408. } else {
  409. pgprot_t prot = vm_get_page_prot(vma->vm_flags);
  410. vma->vm_flags |= VM_MIXEDMAP;
  411. vma->vm_flags &= ~VM_PFNMAP;
  412. vma->vm_page_prot = pgprot_writecombine(prot);
  413. }
  414. return 0;
  415. }
  416. static struct sg_table *
  417. tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
  418. enum dma_data_direction dir)
  419. {
  420. struct drm_gem_object *gem = attach->dmabuf->priv;
  421. struct tegra_bo *bo = to_tegra_bo(gem);
  422. struct sg_table *sgt;
  423. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  424. if (!sgt)
  425. return NULL;
  426. if (bo->pages) {
  427. struct scatterlist *sg;
  428. unsigned int i;
  429. if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
  430. goto free;
  431. for_each_sg(sgt->sgl, sg, bo->num_pages, i)
  432. sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
  433. if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
  434. goto free;
  435. } else {
  436. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  437. goto free;
  438. sg_dma_address(sgt->sgl) = bo->paddr;
  439. sg_dma_len(sgt->sgl) = gem->size;
  440. }
  441. return sgt;
  442. free:
  443. sg_free_table(sgt);
  444. kfree(sgt);
  445. return NULL;
  446. }
  447. static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
  448. struct sg_table *sgt,
  449. enum dma_data_direction dir)
  450. {
  451. struct drm_gem_object *gem = attach->dmabuf->priv;
  452. struct tegra_bo *bo = to_tegra_bo(gem);
  453. if (bo->pages)
  454. dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
  455. sg_free_table(sgt);
  456. kfree(sgt);
  457. }
  458. static void tegra_gem_prime_release(struct dma_buf *buf)
  459. {
  460. drm_gem_dmabuf_release(buf);
  461. }
  462. static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
  463. unsigned long page)
  464. {
  465. return NULL;
  466. }
  467. static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
  468. unsigned long page,
  469. void *addr)
  470. {
  471. }
  472. static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
  473. {
  474. return NULL;
  475. }
  476. static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
  477. void *addr)
  478. {
  479. }
  480. static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
  481. {
  482. return -EINVAL;
  483. }
  484. static void *tegra_gem_prime_vmap(struct dma_buf *buf)
  485. {
  486. struct drm_gem_object *gem = buf->priv;
  487. struct tegra_bo *bo = to_tegra_bo(gem);
  488. return bo->vaddr;
  489. }
  490. static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
  491. {
  492. }
  493. static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
  494. .map_dma_buf = tegra_gem_prime_map_dma_buf,
  495. .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
  496. .release = tegra_gem_prime_release,
  497. .map_atomic = tegra_gem_prime_kmap_atomic,
  498. .unmap_atomic = tegra_gem_prime_kunmap_atomic,
  499. .map = tegra_gem_prime_kmap,
  500. .unmap = tegra_gem_prime_kunmap,
  501. .mmap = tegra_gem_prime_mmap,
  502. .vmap = tegra_gem_prime_vmap,
  503. .vunmap = tegra_gem_prime_vunmap,
  504. };
  505. struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
  506. struct drm_gem_object *gem,
  507. int flags)
  508. {
  509. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  510. exp_info.ops = &tegra_gem_prime_dmabuf_ops;
  511. exp_info.size = gem->size;
  512. exp_info.flags = flags;
  513. exp_info.priv = gem;
  514. return drm_gem_dmabuf_export(drm, &exp_info);
  515. }
  516. struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
  517. struct dma_buf *buf)
  518. {
  519. struct tegra_bo *bo;
  520. if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
  521. struct drm_gem_object *gem = buf->priv;
  522. if (gem->dev == drm) {
  523. drm_gem_object_reference(gem);
  524. return gem;
  525. }
  526. }
  527. bo = tegra_bo_import(drm, buf);
  528. if (IS_ERR(bo))
  529. return ERR_CAST(bo);
  530. return &bo->gem;
  531. }