exynos_drm_dmabuf.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. /* exynos_drm_dmabuf.c
  2. *
  3. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  4. * Author: Inki Dae <inki.dae@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <drm/drmP.h>
  12. #include <drm/exynos_drm.h>
  13. #include "exynos_drm_dmabuf.h"
  14. #include "exynos_drm_drv.h"
  15. #include "exynos_drm_gem.h"
  16. #include <linux/dma-buf.h>
  17. struct exynos_drm_dmabuf_attachment {
  18. struct sg_table sgt;
  19. enum dma_data_direction dir;
  20. bool is_mapped;
  21. };
  22. static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
  23. {
  24. return to_exynos_gem_obj(buf->priv);
  25. }
  26. static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
  27. struct device *dev,
  28. struct dma_buf_attachment *attach)
  29. {
  30. struct exynos_drm_dmabuf_attachment *exynos_attach;
  31. exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
  32. if (!exynos_attach)
  33. return -ENOMEM;
  34. exynos_attach->dir = DMA_NONE;
  35. attach->priv = exynos_attach;
  36. return 0;
  37. }
  38. static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
  39. struct dma_buf_attachment *attach)
  40. {
  41. struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
  42. struct sg_table *sgt;
  43. if (!exynos_attach)
  44. return;
  45. sgt = &exynos_attach->sgt;
  46. if (exynos_attach->dir != DMA_NONE)
  47. dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
  48. exynos_attach->dir);
  49. sg_free_table(sgt);
  50. kfree(exynos_attach);
  51. attach->priv = NULL;
  52. }
  53. static struct sg_table *
  54. exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
  55. enum dma_data_direction dir)
  56. {
  57. struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
  58. struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
  59. struct drm_device *dev = gem_obj->base.dev;
  60. struct exynos_drm_gem_buf *buf;
  61. struct scatterlist *rd, *wr;
  62. struct sg_table *sgt = NULL;
  63. unsigned int i;
  64. int nents, ret;
  65. /* just return current sgt if already requested. */
  66. if (exynos_attach->dir == dir && exynos_attach->is_mapped)
  67. return &exynos_attach->sgt;
  68. buf = gem_obj->buffer;
  69. if (!buf) {
  70. DRM_ERROR("buffer is null.\n");
  71. return ERR_PTR(-ENOMEM);
  72. }
  73. sgt = &exynos_attach->sgt;
  74. ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
  75. if (ret) {
  76. DRM_ERROR("failed to alloc sgt.\n");
  77. return ERR_PTR(-ENOMEM);
  78. }
  79. mutex_lock(&dev->struct_mutex);
  80. rd = buf->sgt->sgl;
  81. wr = sgt->sgl;
  82. for (i = 0; i < sgt->orig_nents; ++i) {
  83. sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
  84. rd = sg_next(rd);
  85. wr = sg_next(wr);
  86. }
  87. if (dir != DMA_NONE) {
  88. nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
  89. if (!nents) {
  90. DRM_ERROR("failed to map sgl with iommu.\n");
  91. sg_free_table(sgt);
  92. sgt = ERR_PTR(-EIO);
  93. goto err_unlock;
  94. }
  95. }
  96. exynos_attach->is_mapped = true;
  97. exynos_attach->dir = dir;
  98. attach->priv = exynos_attach;
  99. DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
  100. err_unlock:
  101. mutex_unlock(&dev->struct_mutex);
  102. return sgt;
  103. }
  104. static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
  105. struct sg_table *sgt,
  106. enum dma_data_direction dir)
  107. {
  108. /* Nothing to do. */
  109. }
  110. static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
  111. unsigned long page_num)
  112. {
  113. /* TODO */
  114. return NULL;
  115. }
  116. static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
  117. unsigned long page_num,
  118. void *addr)
  119. {
  120. /* TODO */
  121. }
  122. static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
  123. unsigned long page_num)
  124. {
  125. /* TODO */
  126. return NULL;
  127. }
  128. static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
  129. unsigned long page_num, void *addr)
  130. {
  131. /* TODO */
  132. }
  133. static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
  134. struct vm_area_struct *vma)
  135. {
  136. return -ENOTTY;
  137. }
  138. static struct dma_buf_ops exynos_dmabuf_ops = {
  139. .attach = exynos_gem_attach_dma_buf,
  140. .detach = exynos_gem_detach_dma_buf,
  141. .map_dma_buf = exynos_gem_map_dma_buf,
  142. .unmap_dma_buf = exynos_gem_unmap_dma_buf,
  143. .kmap = exynos_gem_dmabuf_kmap,
  144. .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
  145. .kunmap = exynos_gem_dmabuf_kunmap,
  146. .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
  147. .mmap = exynos_gem_dmabuf_mmap,
  148. .release = drm_gem_dmabuf_release,
  149. };
  150. struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
  151. struct drm_gem_object *obj, int flags)
  152. {
  153. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  154. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  155. exp_info.ops = &exynos_dmabuf_ops;
  156. exp_info.size = exynos_gem_obj->base.size;
  157. exp_info.flags = flags;
  158. exp_info.priv = obj;
  159. return dma_buf_export(&exp_info);
  160. }
  161. struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
  162. struct dma_buf *dma_buf)
  163. {
  164. struct dma_buf_attachment *attach;
  165. struct sg_table *sgt;
  166. struct scatterlist *sgl;
  167. struct exynos_drm_gem_obj *exynos_gem_obj;
  168. struct exynos_drm_gem_buf *buffer;
  169. int ret;
  170. /* is this one of own objects? */
  171. if (dma_buf->ops == &exynos_dmabuf_ops) {
  172. struct drm_gem_object *obj;
  173. obj = dma_buf->priv;
  174. /* is it from our device? */
  175. if (obj->dev == drm_dev) {
  176. /*
  177. * Importing dmabuf exported from out own gem increases
  178. * refcount on gem itself instead of f_count of dmabuf.
  179. */
  180. drm_gem_object_reference(obj);
  181. return obj;
  182. }
  183. }
  184. attach = dma_buf_attach(dma_buf, drm_dev->dev);
  185. if (IS_ERR(attach))
  186. return ERR_PTR(-EINVAL);
  187. get_dma_buf(dma_buf);
  188. sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  189. if (IS_ERR(sgt)) {
  190. ret = PTR_ERR(sgt);
  191. goto err_buf_detach;
  192. }
  193. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  194. if (!buffer) {
  195. ret = -ENOMEM;
  196. goto err_unmap_attach;
  197. }
  198. exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
  199. if (!exynos_gem_obj) {
  200. ret = -ENOMEM;
  201. goto err_free_buffer;
  202. }
  203. sgl = sgt->sgl;
  204. buffer->size = dma_buf->size;
  205. buffer->dma_addr = sg_dma_address(sgl);
  206. if (sgt->nents == 1) {
  207. /* always physically continuous memory if sgt->nents is 1. */
  208. exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
  209. } else {
  210. /*
  211. * this case could be CONTIG or NONCONTIG type but for now
  212. * sets NONCONTIG.
  213. * TODO. we have to find a way that exporter can notify
  214. * the type of its own buffer to importer.
  215. */
  216. exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
  217. }
  218. exynos_gem_obj->buffer = buffer;
  219. buffer->sgt = sgt;
  220. exynos_gem_obj->base.import_attach = attach;
  221. DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
  222. buffer->size);
  223. return &exynos_gem_obj->base;
  224. err_free_buffer:
  225. kfree(buffer);
  226. buffer = NULL;
  227. err_unmap_attach:
  228. dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
  229. err_buf_detach:
  230. dma_buf_detach(dma_buf, attach);
  231. dma_buf_put(dma_buf);
  232. return ERR_PTR(ret);
  233. }