rockchip_drm_gem.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587
  1. /*
  2. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  3. * Author:Mark Yao <mark.yao@rock-chips.com>
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <drm/drm.h>
  15. #include <drm/drmP.h>
  16. #include <drm/drm_gem.h>
  17. #include <drm/drm_vma_manager.h>
  18. #include <linux/dma-buf.h>
  19. #include <linux/iommu.h>
  20. #include "rockchip_drm_drv.h"
  21. #include "rockchip_drm_gem.h"
  22. static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
  23. {
  24. struct drm_device *drm = rk_obj->base.dev;
  25. struct rockchip_drm_private *private = drm->dev_private;
  26. int prot = IOMMU_READ | IOMMU_WRITE;
  27. ssize_t ret;
  28. mutex_lock(&private->mm_lock);
  29. ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
  30. rk_obj->base.size, PAGE_SIZE,
  31. 0, 0);
  32. mutex_unlock(&private->mm_lock);
  33. if (ret < 0) {
  34. DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
  35. return ret;
  36. }
  37. rk_obj->dma_addr = rk_obj->mm.start;
  38. ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
  39. rk_obj->sgt->nents, prot);
  40. if (ret < rk_obj->base.size) {
  41. DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
  42. ret, rk_obj->base.size);
  43. ret = -ENOMEM;
  44. goto err_remove_node;
  45. }
  46. rk_obj->size = ret;
  47. return 0;
  48. err_remove_node:
  49. mutex_lock(&private->mm_lock);
  50. drm_mm_remove_node(&rk_obj->mm);
  51. mutex_unlock(&private->mm_lock);
  52. return ret;
  53. }
  54. static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
  55. {
  56. struct drm_device *drm = rk_obj->base.dev;
  57. struct rockchip_drm_private *private = drm->dev_private;
  58. iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
  59. mutex_lock(&private->mm_lock);
  60. drm_mm_remove_node(&rk_obj->mm);
  61. mutex_unlock(&private->mm_lock);
  62. return 0;
  63. }
  64. static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
  65. {
  66. struct drm_device *drm = rk_obj->base.dev;
  67. int ret, i;
  68. struct scatterlist *s;
  69. rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
  70. if (IS_ERR(rk_obj->pages))
  71. return PTR_ERR(rk_obj->pages);
  72. rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
  73. rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
  74. if (IS_ERR(rk_obj->sgt)) {
  75. ret = PTR_ERR(rk_obj->sgt);
  76. goto err_put_pages;
  77. }
  78. /*
  79. * Fake up the SG table so that dma_sync_sg_for_device() can be used
  80. * to flush the pages associated with it.
  81. *
  82. * TODO: Replace this by drm_clflush_sg() once it can be implemented
  83. * without relying on symbols that are not exported.
  84. */
  85. for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
  86. sg_dma_address(s) = sg_phys(s);
  87. dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
  88. DMA_TO_DEVICE);
  89. return 0;
  90. err_put_pages:
  91. drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
  92. return ret;
  93. }
  94. static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
  95. {
  96. sg_free_table(rk_obj->sgt);
  97. kfree(rk_obj->sgt);
  98. drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
  99. }
  100. static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
  101. bool alloc_kmap)
  102. {
  103. int ret;
  104. ret = rockchip_gem_get_pages(rk_obj);
  105. if (ret < 0)
  106. return ret;
  107. ret = rockchip_gem_iommu_map(rk_obj);
  108. if (ret < 0)
  109. goto err_free;
  110. if (alloc_kmap) {
  111. rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
  112. pgprot_writecombine(PAGE_KERNEL));
  113. if (!rk_obj->kvaddr) {
  114. DRM_ERROR("failed to vmap() buffer\n");
  115. ret = -ENOMEM;
  116. goto err_unmap;
  117. }
  118. }
  119. return 0;
  120. err_unmap:
  121. rockchip_gem_iommu_unmap(rk_obj);
  122. err_free:
  123. rockchip_gem_put_pages(rk_obj);
  124. return ret;
  125. }
  126. static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
  127. bool alloc_kmap)
  128. {
  129. struct drm_gem_object *obj = &rk_obj->base;
  130. struct drm_device *drm = obj->dev;
  131. rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
  132. if (!alloc_kmap)
  133. rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
  134. rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
  135. &rk_obj->dma_addr, GFP_KERNEL,
  136. rk_obj->dma_attrs);
  137. if (!rk_obj->kvaddr) {
  138. DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
  139. return -ENOMEM;
  140. }
  141. return 0;
  142. }
  143. static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
  144. bool alloc_kmap)
  145. {
  146. struct drm_gem_object *obj = &rk_obj->base;
  147. struct drm_device *drm = obj->dev;
  148. struct rockchip_drm_private *private = drm->dev_private;
  149. if (private->domain)
  150. return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
  151. else
  152. return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
  153. }
  154. static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
  155. {
  156. vunmap(rk_obj->kvaddr);
  157. rockchip_gem_iommu_unmap(rk_obj);
  158. rockchip_gem_put_pages(rk_obj);
  159. }
  160. static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
  161. {
  162. struct drm_gem_object *obj = &rk_obj->base;
  163. struct drm_device *drm = obj->dev;
  164. dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
  165. rk_obj->dma_attrs);
  166. }
  167. static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
  168. {
  169. if (rk_obj->pages)
  170. rockchip_gem_free_iommu(rk_obj);
  171. else
  172. rockchip_gem_free_dma(rk_obj);
  173. }
  174. static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
  175. struct vm_area_struct *vma)
  176. {
  177. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  178. unsigned int i, count = obj->size >> PAGE_SHIFT;
  179. unsigned long user_count = vma_pages(vma);
  180. unsigned long uaddr = vma->vm_start;
  181. unsigned long offset = vma->vm_pgoff;
  182. unsigned long end = user_count + offset;
  183. int ret;
  184. if (user_count == 0)
  185. return -ENXIO;
  186. if (end > count)
  187. return -ENXIO;
  188. for (i = offset; i < end; i++) {
  189. ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]);
  190. if (ret)
  191. return ret;
  192. uaddr += PAGE_SIZE;
  193. }
  194. return 0;
  195. }
  196. static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
  197. struct vm_area_struct *vma)
  198. {
  199. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  200. struct drm_device *drm = obj->dev;
  201. return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
  202. obj->size, rk_obj->dma_attrs);
  203. }
  204. static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
  205. struct vm_area_struct *vma)
  206. {
  207. int ret;
  208. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  209. /*
  210. * We allocated a struct page table for rk_obj, so clear
  211. * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
  212. */
  213. vma->vm_flags &= ~VM_PFNMAP;
  214. if (rk_obj->pages)
  215. ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
  216. else
  217. ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
  218. if (ret)
  219. drm_gem_vm_close(vma);
  220. return ret;
  221. }
  222. int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
  223. struct vm_area_struct *vma)
  224. {
  225. int ret;
  226. ret = drm_gem_mmap_obj(obj, obj->size, vma);
  227. if (ret)
  228. return ret;
  229. return rockchip_drm_gem_object_mmap(obj, vma);
  230. }
  231. /* drm driver mmap file operations */
  232. int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  233. {
  234. struct drm_gem_object *obj;
  235. int ret;
  236. ret = drm_gem_mmap(filp, vma);
  237. if (ret)
  238. return ret;
  239. /*
  240. * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
  241. * whole buffer from the start.
  242. */
  243. vma->vm_pgoff = 0;
  244. obj = vma->vm_private_data;
  245. return rockchip_drm_gem_object_mmap(obj, vma);
  246. }
  247. static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
  248. {
  249. drm_gem_object_release(&rk_obj->base);
  250. kfree(rk_obj);
  251. }
  252. struct rockchip_gem_object *
  253. rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
  254. {
  255. struct rockchip_gem_object *rk_obj;
  256. struct drm_gem_object *obj;
  257. size = round_up(size, PAGE_SIZE);
  258. rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
  259. if (!rk_obj)
  260. return ERR_PTR(-ENOMEM);
  261. obj = &rk_obj->base;
  262. drm_gem_object_init(drm, obj, size);
  263. return rk_obj;
  264. }
  265. struct rockchip_gem_object *
  266. rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
  267. bool alloc_kmap)
  268. {
  269. struct rockchip_gem_object *rk_obj;
  270. int ret;
  271. rk_obj = rockchip_gem_alloc_object(drm, size);
  272. if (IS_ERR(rk_obj))
  273. return rk_obj;
  274. ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
  275. if (ret)
  276. goto err_free_rk_obj;
  277. return rk_obj;
  278. err_free_rk_obj:
  279. rockchip_gem_release_object(rk_obj);
  280. return ERR_PTR(ret);
  281. }
  282. /*
  283. * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
  284. * callback function
  285. */
  286. void rockchip_gem_free_object(struct drm_gem_object *obj)
  287. {
  288. struct drm_device *drm = obj->dev;
  289. struct rockchip_drm_private *private = drm->dev_private;
  290. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  291. if (obj->import_attach) {
  292. if (private->domain) {
  293. rockchip_gem_iommu_unmap(rk_obj);
  294. } else {
  295. dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
  296. rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
  297. }
  298. drm_prime_gem_destroy(obj, rk_obj->sgt);
  299. } else {
  300. rockchip_gem_free_buf(rk_obj);
  301. }
  302. rockchip_gem_release_object(rk_obj);
  303. }
  304. /*
  305. * rockchip_gem_create_with_handle - allocate an object with the given
  306. * size and create a gem handle on it
  307. *
  308. * returns a struct rockchip_gem_object* on success or ERR_PTR values
  309. * on failure.
  310. */
  311. static struct rockchip_gem_object *
  312. rockchip_gem_create_with_handle(struct drm_file *file_priv,
  313. struct drm_device *drm, unsigned int size,
  314. unsigned int *handle)
  315. {
  316. struct rockchip_gem_object *rk_obj;
  317. struct drm_gem_object *obj;
  318. int ret;
  319. rk_obj = rockchip_gem_create_object(drm, size, false);
  320. if (IS_ERR(rk_obj))
  321. return ERR_CAST(rk_obj);
  322. obj = &rk_obj->base;
  323. /*
  324. * allocate a id of idr table where the obj is registered
  325. * and handle has the id what user can see.
  326. */
  327. ret = drm_gem_handle_create(file_priv, obj, handle);
  328. if (ret)
  329. goto err_handle_create;
  330. /* drop reference from allocate - handle holds it now. */
  331. drm_gem_object_put_unlocked(obj);
  332. return rk_obj;
  333. err_handle_create:
  334. rockchip_gem_free_object(obj);
  335. return ERR_PTR(ret);
  336. }
  337. /*
  338. * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
  339. * function
  340. *
  341. * This aligns the pitch and size arguments to the minimum required. wrap
  342. * this into your own function if you need bigger alignment.
  343. */
  344. int rockchip_gem_dumb_create(struct drm_file *file_priv,
  345. struct drm_device *dev,
  346. struct drm_mode_create_dumb *args)
  347. {
  348. struct rockchip_gem_object *rk_obj;
  349. int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  350. /*
  351. * align to 64 bytes since Mali requires it.
  352. */
  353. args->pitch = ALIGN(min_pitch, 64);
  354. args->size = args->pitch * args->height;
  355. rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
  356. &args->handle);
  357. return PTR_ERR_OR_ZERO(rk_obj);
  358. }
  359. /*
  360. * Allocate a sg_table for this GEM object.
  361. * Note: Both the table's contents, and the sg_table itself must be freed by
  362. * the caller.
  363. * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
  364. */
  365. struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
  366. {
  367. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  368. struct drm_device *drm = obj->dev;
  369. struct sg_table *sgt;
  370. int ret;
  371. if (rk_obj->pages)
  372. return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
  373. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  374. if (!sgt)
  375. return ERR_PTR(-ENOMEM);
  376. ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
  377. rk_obj->dma_addr, obj->size,
  378. rk_obj->dma_attrs);
  379. if (ret) {
  380. DRM_ERROR("failed to allocate sgt, %d\n", ret);
  381. kfree(sgt);
  382. return ERR_PTR(ret);
  383. }
  384. return sgt;
  385. }
  386. static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
  387. int count)
  388. {
  389. struct scatterlist *s;
  390. dma_addr_t expected = sg_dma_address(sgt->sgl);
  391. unsigned int i;
  392. unsigned long size = 0;
  393. for_each_sg(sgt->sgl, s, count, i) {
  394. if (sg_dma_address(s) != expected)
  395. break;
  396. expected = sg_dma_address(s) + sg_dma_len(s);
  397. size += sg_dma_len(s);
  398. }
  399. return size;
  400. }
  401. static int
  402. rockchip_gem_iommu_map_sg(struct drm_device *drm,
  403. struct dma_buf_attachment *attach,
  404. struct sg_table *sg,
  405. struct rockchip_gem_object *rk_obj)
  406. {
  407. rk_obj->sgt = sg;
  408. return rockchip_gem_iommu_map(rk_obj);
  409. }
  410. static int
  411. rockchip_gem_dma_map_sg(struct drm_device *drm,
  412. struct dma_buf_attachment *attach,
  413. struct sg_table *sg,
  414. struct rockchip_gem_object *rk_obj)
  415. {
  416. int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
  417. DMA_BIDIRECTIONAL);
  418. if (!count)
  419. return -EINVAL;
  420. if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
  421. DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
  422. dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
  423. DMA_BIDIRECTIONAL);
  424. return -EINVAL;
  425. }
  426. rk_obj->dma_addr = sg_dma_address(sg->sgl);
  427. rk_obj->sgt = sg;
  428. return 0;
  429. }
  430. struct drm_gem_object *
  431. rockchip_gem_prime_import_sg_table(struct drm_device *drm,
  432. struct dma_buf_attachment *attach,
  433. struct sg_table *sg)
  434. {
  435. struct rockchip_drm_private *private = drm->dev_private;
  436. struct rockchip_gem_object *rk_obj;
  437. int ret;
  438. rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
  439. if (IS_ERR(rk_obj))
  440. return ERR_CAST(rk_obj);
  441. if (private->domain)
  442. ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
  443. else
  444. ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
  445. if (ret < 0) {
  446. DRM_ERROR("failed to import sg table: %d\n", ret);
  447. goto err_free_rk_obj;
  448. }
  449. return &rk_obj->base;
  450. err_free_rk_obj:
  451. rockchip_gem_release_object(rk_obj);
  452. return ERR_PTR(ret);
  453. }
  454. void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
  455. {
  456. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  457. if (rk_obj->pages)
  458. return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
  459. pgprot_writecombine(PAGE_KERNEL));
  460. if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
  461. return NULL;
  462. return rk_obj->kvaddr;
  463. }
  464. void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
  465. {
  466. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  467. if (rk_obj->pages) {
  468. vunmap(vaddr);
  469. return;
  470. }
  471. /* Nothing to do if allocated by DMA mapping API. */
  472. }