gem.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. /*
  2. * NVIDIA Tegra DRM GEM helper functions
  3. *
  4. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5. * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
  6. *
  7. * Based on the GEM/CMA helpers
  8. *
  9. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. #include <linux/dma-buf.h>
  16. #include <linux/iommu.h>
  17. #include <drm/tegra_drm.h>
  18. #include "drm.h"
  19. #include "gem.h"
  20. static void tegra_bo_put(struct host1x_bo *bo)
  21. {
  22. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  23. drm_gem_object_put_unlocked(&obj->gem);
  24. }
  25. static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
  26. {
  27. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  28. *sgt = obj->sgt;
  29. return obj->paddr;
  30. }
  31. static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
  32. {
  33. }
  34. static void *tegra_bo_mmap(struct host1x_bo *bo)
  35. {
  36. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  37. if (obj->vaddr)
  38. return obj->vaddr;
  39. else if (obj->gem.import_attach)
  40. return dma_buf_vmap(obj->gem.import_attach->dmabuf);
  41. else
  42. return vmap(obj->pages, obj->num_pages, VM_MAP,
  43. pgprot_writecombine(PAGE_KERNEL));
  44. }
  45. static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
  46. {
  47. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  48. if (obj->vaddr)
  49. return;
  50. else if (obj->gem.import_attach)
  51. dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
  52. else
  53. vunmap(addr);
  54. }
  55. static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
  56. {
  57. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  58. if (obj->vaddr)
  59. return obj->vaddr + page * PAGE_SIZE;
  60. else if (obj->gem.import_attach)
  61. return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
  62. else
  63. return vmap(obj->pages + page, 1, VM_MAP,
  64. pgprot_writecombine(PAGE_KERNEL));
  65. }
  66. static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
  67. void *addr)
  68. {
  69. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  70. if (obj->vaddr)
  71. return;
  72. else if (obj->gem.import_attach)
  73. dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
  74. else
  75. vunmap(addr);
  76. }
  77. static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
  78. {
  79. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  80. drm_gem_object_get(&obj->gem);
  81. return bo;
  82. }
  83. static const struct host1x_bo_ops tegra_bo_ops = {
  84. .get = tegra_bo_get,
  85. .put = tegra_bo_put,
  86. .pin = tegra_bo_pin,
  87. .unpin = tegra_bo_unpin,
  88. .mmap = tegra_bo_mmap,
  89. .munmap = tegra_bo_munmap,
  90. .kmap = tegra_bo_kmap,
  91. .kunmap = tegra_bo_kunmap,
  92. };
  93. static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
  94. {
  95. int prot = IOMMU_READ | IOMMU_WRITE;
  96. int err;
  97. if (bo->mm)
  98. return -EBUSY;
  99. bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
  100. if (!bo->mm)
  101. return -ENOMEM;
  102. mutex_lock(&tegra->mm_lock);
  103. err = drm_mm_insert_node_generic(&tegra->mm,
  104. bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
  105. if (err < 0) {
  106. dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
  107. err);
  108. goto unlock;
  109. }
  110. bo->paddr = bo->mm->start;
  111. bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
  112. bo->sgt->nents, prot);
  113. if (!bo->size) {
  114. dev_err(tegra->drm->dev, "failed to map buffer\n");
  115. err = -ENOMEM;
  116. goto remove;
  117. }
  118. mutex_unlock(&tegra->mm_lock);
  119. return 0;
  120. remove:
  121. drm_mm_remove_node(bo->mm);
  122. unlock:
  123. mutex_unlock(&tegra->mm_lock);
  124. kfree(bo->mm);
  125. return err;
  126. }
  127. static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
  128. {
  129. if (!bo->mm)
  130. return 0;
  131. mutex_lock(&tegra->mm_lock);
  132. iommu_unmap(tegra->domain, bo->paddr, bo->size);
  133. drm_mm_remove_node(bo->mm);
  134. mutex_unlock(&tegra->mm_lock);
  135. kfree(bo->mm);
  136. return 0;
  137. }
  138. static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
  139. size_t size)
  140. {
  141. struct tegra_bo *bo;
  142. int err;
  143. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  144. if (!bo)
  145. return ERR_PTR(-ENOMEM);
  146. host1x_bo_init(&bo->base, &tegra_bo_ops);
  147. size = round_up(size, PAGE_SIZE);
  148. err = drm_gem_object_init(drm, &bo->gem, size);
  149. if (err < 0)
  150. goto free;
  151. err = drm_gem_create_mmap_offset(&bo->gem);
  152. if (err < 0)
  153. goto release;
  154. return bo;
  155. release:
  156. drm_gem_object_release(&bo->gem);
  157. free:
  158. kfree(bo);
  159. return ERR_PTR(err);
  160. }
  161. static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
  162. {
  163. if (bo->pages) {
  164. dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
  165. DMA_BIDIRECTIONAL);
  166. drm_gem_put_pages(&bo->gem, bo->pages, true, true);
  167. sg_free_table(bo->sgt);
  168. kfree(bo->sgt);
  169. } else if (bo->vaddr) {
  170. dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
  171. }
  172. }
  173. static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
  174. {
  175. int err;
  176. bo->pages = drm_gem_get_pages(&bo->gem);
  177. if (IS_ERR(bo->pages))
  178. return PTR_ERR(bo->pages);
  179. bo->num_pages = bo->gem.size >> PAGE_SHIFT;
  180. bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
  181. if (IS_ERR(bo->sgt)) {
  182. err = PTR_ERR(bo->sgt);
  183. goto put_pages;
  184. }
  185. err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
  186. DMA_BIDIRECTIONAL);
  187. if (err == 0) {
  188. err = -EFAULT;
  189. goto free_sgt;
  190. }
  191. return 0;
  192. free_sgt:
  193. sg_free_table(bo->sgt);
  194. kfree(bo->sgt);
  195. put_pages:
  196. drm_gem_put_pages(&bo->gem, bo->pages, false, false);
  197. return err;
  198. }
  199. static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
  200. {
  201. struct tegra_drm *tegra = drm->dev_private;
  202. int err;
  203. if (tegra->domain) {
  204. err = tegra_bo_get_pages(drm, bo);
  205. if (err < 0)
  206. return err;
  207. err = tegra_bo_iommu_map(tegra, bo);
  208. if (err < 0) {
  209. tegra_bo_free(drm, bo);
  210. return err;
  211. }
  212. } else {
  213. size_t size = bo->gem.size;
  214. bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
  215. GFP_KERNEL | __GFP_NOWARN);
  216. if (!bo->vaddr) {
  217. dev_err(drm->dev,
  218. "failed to allocate buffer of size %zu\n",
  219. size);
  220. return -ENOMEM;
  221. }
  222. }
  223. return 0;
  224. }
  225. struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
  226. unsigned long flags)
  227. {
  228. struct tegra_bo *bo;
  229. int err;
  230. bo = tegra_bo_alloc_object(drm, size);
  231. if (IS_ERR(bo))
  232. return bo;
  233. err = tegra_bo_alloc(drm, bo);
  234. if (err < 0)
  235. goto release;
  236. if (flags & DRM_TEGRA_GEM_CREATE_TILED)
  237. bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
  238. if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
  239. bo->flags |= TEGRA_BO_BOTTOM_UP;
  240. return bo;
  241. release:
  242. drm_gem_object_release(&bo->gem);
  243. kfree(bo);
  244. return ERR_PTR(err);
  245. }
  246. struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
  247. struct drm_device *drm,
  248. size_t size,
  249. unsigned long flags,
  250. u32 *handle)
  251. {
  252. struct tegra_bo *bo;
  253. int err;
  254. bo = tegra_bo_create(drm, size, flags);
  255. if (IS_ERR(bo))
  256. return bo;
  257. err = drm_gem_handle_create(file, &bo->gem, handle);
  258. if (err) {
  259. tegra_bo_free_object(&bo->gem);
  260. return ERR_PTR(err);
  261. }
  262. drm_gem_object_put_unlocked(&bo->gem);
  263. return bo;
  264. }
  265. static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
  266. struct dma_buf *buf)
  267. {
  268. struct tegra_drm *tegra = drm->dev_private;
  269. struct dma_buf_attachment *attach;
  270. struct tegra_bo *bo;
  271. int err;
  272. bo = tegra_bo_alloc_object(drm, buf->size);
  273. if (IS_ERR(bo))
  274. return bo;
  275. attach = dma_buf_attach(buf, drm->dev);
  276. if (IS_ERR(attach)) {
  277. err = PTR_ERR(attach);
  278. goto free;
  279. }
  280. get_dma_buf(buf);
  281. bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
  282. if (IS_ERR(bo->sgt)) {
  283. err = PTR_ERR(bo->sgt);
  284. goto detach;
  285. }
  286. if (tegra->domain) {
  287. err = tegra_bo_iommu_map(tegra, bo);
  288. if (err < 0)
  289. goto detach;
  290. } else {
  291. if (bo->sgt->nents > 1) {
  292. err = -EINVAL;
  293. goto detach;
  294. }
  295. bo->paddr = sg_dma_address(bo->sgt->sgl);
  296. }
  297. bo->gem.import_attach = attach;
  298. return bo;
  299. detach:
  300. if (!IS_ERR_OR_NULL(bo->sgt))
  301. dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
  302. dma_buf_detach(buf, attach);
  303. dma_buf_put(buf);
  304. free:
  305. drm_gem_object_release(&bo->gem);
  306. kfree(bo);
  307. return ERR_PTR(err);
  308. }
  309. void tegra_bo_free_object(struct drm_gem_object *gem)
  310. {
  311. struct tegra_drm *tegra = gem->dev->dev_private;
  312. struct tegra_bo *bo = to_tegra_bo(gem);
  313. if (tegra->domain)
  314. tegra_bo_iommu_unmap(tegra, bo);
  315. if (gem->import_attach) {
  316. dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
  317. DMA_TO_DEVICE);
  318. drm_prime_gem_destroy(gem, NULL);
  319. } else {
  320. tegra_bo_free(gem->dev, bo);
  321. }
  322. drm_gem_object_release(gem);
  323. kfree(bo);
  324. }
  325. int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
  326. struct drm_mode_create_dumb *args)
  327. {
  328. unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  329. struct tegra_drm *tegra = drm->dev_private;
  330. struct tegra_bo *bo;
  331. args->pitch = round_up(min_pitch, tegra->pitch_align);
  332. args->size = args->pitch * args->height;
  333. bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
  334. &args->handle);
  335. if (IS_ERR(bo))
  336. return PTR_ERR(bo);
  337. return 0;
  338. }
  339. static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
  340. {
  341. struct vm_area_struct *vma = vmf->vma;
  342. struct drm_gem_object *gem = vma->vm_private_data;
  343. struct tegra_bo *bo = to_tegra_bo(gem);
  344. struct page *page;
  345. pgoff_t offset;
  346. if (!bo->pages)
  347. return VM_FAULT_SIGBUS;
  348. offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  349. page = bo->pages[offset];
  350. return vmf_insert_page(vma, vmf->address, page);
  351. }
  352. const struct vm_operations_struct tegra_bo_vm_ops = {
  353. .fault = tegra_bo_fault,
  354. .open = drm_gem_vm_open,
  355. .close = drm_gem_vm_close,
  356. };
  357. int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
  358. {
  359. struct tegra_bo *bo = to_tegra_bo(gem);
  360. if (!bo->pages) {
  361. unsigned long vm_pgoff = vma->vm_pgoff;
  362. int err;
  363. /*
  364. * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
  365. * and set the vm_pgoff (used as a fake buffer offset by DRM)
  366. * to 0 as we want to map the whole buffer.
  367. */
  368. vma->vm_flags &= ~VM_PFNMAP;
  369. vma->vm_pgoff = 0;
  370. err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
  371. gem->size);
  372. if (err < 0) {
  373. drm_gem_vm_close(vma);
  374. return err;
  375. }
  376. vma->vm_pgoff = vm_pgoff;
  377. } else {
  378. pgprot_t prot = vm_get_page_prot(vma->vm_flags);
  379. vma->vm_flags |= VM_MIXEDMAP;
  380. vma->vm_flags &= ~VM_PFNMAP;
  381. vma->vm_page_prot = pgprot_writecombine(prot);
  382. }
  383. return 0;
  384. }
  385. int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
  386. {
  387. struct drm_gem_object *gem;
  388. int err;
  389. err = drm_gem_mmap(file, vma);
  390. if (err < 0)
  391. return err;
  392. gem = vma->vm_private_data;
  393. return __tegra_gem_mmap(gem, vma);
  394. }
  395. static struct sg_table *
  396. tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
  397. enum dma_data_direction dir)
  398. {
  399. struct drm_gem_object *gem = attach->dmabuf->priv;
  400. struct tegra_bo *bo = to_tegra_bo(gem);
  401. struct sg_table *sgt;
  402. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  403. if (!sgt)
  404. return NULL;
  405. if (bo->pages) {
  406. struct scatterlist *sg;
  407. unsigned int i;
  408. if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
  409. goto free;
  410. for_each_sg(sgt->sgl, sg, bo->num_pages, i)
  411. sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
  412. if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
  413. goto free;
  414. } else {
  415. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  416. goto free;
  417. sg_dma_address(sgt->sgl) = bo->paddr;
  418. sg_dma_len(sgt->sgl) = gem->size;
  419. }
  420. return sgt;
  421. free:
  422. sg_free_table(sgt);
  423. kfree(sgt);
  424. return NULL;
  425. }
  426. static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
  427. struct sg_table *sgt,
  428. enum dma_data_direction dir)
  429. {
  430. struct drm_gem_object *gem = attach->dmabuf->priv;
  431. struct tegra_bo *bo = to_tegra_bo(gem);
  432. if (bo->pages)
  433. dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
  434. sg_free_table(sgt);
  435. kfree(sgt);
  436. }
  437. static void tegra_gem_prime_release(struct dma_buf *buf)
  438. {
  439. drm_gem_dmabuf_release(buf);
  440. }
  441. static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
  442. enum dma_data_direction direction)
  443. {
  444. struct drm_gem_object *gem = buf->priv;
  445. struct tegra_bo *bo = to_tegra_bo(gem);
  446. struct drm_device *drm = gem->dev;
  447. if (bo->pages)
  448. dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
  449. DMA_FROM_DEVICE);
  450. return 0;
  451. }
  452. static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
  453. enum dma_data_direction direction)
  454. {
  455. struct drm_gem_object *gem = buf->priv;
  456. struct tegra_bo *bo = to_tegra_bo(gem);
  457. struct drm_device *drm = gem->dev;
  458. if (bo->pages)
  459. dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
  460. DMA_TO_DEVICE);
  461. return 0;
  462. }
  463. static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
  464. {
  465. return NULL;
  466. }
  467. static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
  468. void *addr)
  469. {
  470. }
  471. static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
  472. {
  473. struct drm_gem_object *gem = buf->priv;
  474. int err;
  475. err = drm_gem_mmap_obj(gem, gem->size, vma);
  476. if (err < 0)
  477. return err;
  478. return __tegra_gem_mmap(gem, vma);
  479. }
  480. static void *tegra_gem_prime_vmap(struct dma_buf *buf)
  481. {
  482. struct drm_gem_object *gem = buf->priv;
  483. struct tegra_bo *bo = to_tegra_bo(gem);
  484. return bo->vaddr;
  485. }
  486. static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
  487. {
  488. }
  489. static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
  490. .map_dma_buf = tegra_gem_prime_map_dma_buf,
  491. .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
  492. .release = tegra_gem_prime_release,
  493. .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
  494. .end_cpu_access = tegra_gem_prime_end_cpu_access,
  495. .map = tegra_gem_prime_kmap,
  496. .unmap = tegra_gem_prime_kunmap,
  497. .mmap = tegra_gem_prime_mmap,
  498. .vmap = tegra_gem_prime_vmap,
  499. .vunmap = tegra_gem_prime_vunmap,
  500. };
  501. struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
  502. struct drm_gem_object *gem,
  503. int flags)
  504. {
  505. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  506. exp_info.exp_name = KBUILD_MODNAME;
  507. exp_info.owner = drm->driver->fops->owner;
  508. exp_info.ops = &tegra_gem_prime_dmabuf_ops;
  509. exp_info.size = gem->size;
  510. exp_info.flags = flags;
  511. exp_info.priv = gem;
  512. return drm_gem_dmabuf_export(drm, &exp_info);
  513. }
  514. struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
  515. struct dma_buf *buf)
  516. {
  517. struct tegra_bo *bo;
  518. if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
  519. struct drm_gem_object *gem = buf->priv;
  520. if (gem->dev == drm) {
  521. drm_gem_object_get(gem);
  522. return gem;
  523. }
  524. }
  525. bo = tegra_bo_import(drm, buf);
  526. if (IS_ERR(bo))
  527. return ERR_CAST(bo);
  528. return &bo->gem;
  529. }