videobuf2-dma-contig.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774
  1. /*
  2. * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/dma-buf.h>
  13. #include <linux/module.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/dma-mapping.h>
  18. #include <media/videobuf2-v4l2.h>
  19. #include <media/videobuf2-dma-contig.h>
  20. #include <media/videobuf2-memops.h>
  21. struct vb2_dc_buf {
  22. struct device *dev;
  23. void *vaddr;
  24. unsigned long size;
  25. void *cookie;
  26. dma_addr_t dma_addr;
  27. unsigned long attrs;
  28. enum dma_data_direction dma_dir;
  29. struct sg_table *dma_sgt;
  30. struct frame_vector *vec;
  31. /* MMAP related */
  32. struct vb2_vmarea_handler handler;
  33. atomic_t refcount;
  34. struct sg_table *sgt_base;
  35. /* DMABUF related */
  36. struct dma_buf_attachment *db_attach;
  37. };
  38. /*********************************************/
  39. /* scatterlist table functions */
  40. /*********************************************/
  41. static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
  42. {
  43. struct scatterlist *s;
  44. dma_addr_t expected = sg_dma_address(sgt->sgl);
  45. unsigned int i;
  46. unsigned long size = 0;
  47. for_each_sg(sgt->sgl, s, sgt->nents, i) {
  48. if (sg_dma_address(s) != expected)
  49. break;
  50. expected = sg_dma_address(s) + sg_dma_len(s);
  51. size += sg_dma_len(s);
  52. }
  53. return size;
  54. }
  55. /*********************************************/
  56. /* callbacks for all buffers */
  57. /*********************************************/
  58. static void *vb2_dc_cookie(void *buf_priv)
  59. {
  60. struct vb2_dc_buf *buf = buf_priv;
  61. return &buf->dma_addr;
  62. }
  63. static void *vb2_dc_vaddr(void *buf_priv)
  64. {
  65. struct vb2_dc_buf *buf = buf_priv;
  66. if (!buf->vaddr && buf->db_attach)
  67. buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
  68. return buf->vaddr;
  69. }
  70. static unsigned int vb2_dc_num_users(void *buf_priv)
  71. {
  72. struct vb2_dc_buf *buf = buf_priv;
  73. return atomic_read(&buf->refcount);
  74. }
  75. static void vb2_dc_prepare(void *buf_priv)
  76. {
  77. struct vb2_dc_buf *buf = buf_priv;
  78. struct sg_table *sgt = buf->dma_sgt;
  79. /* DMABUF exporter will flush the cache for us */
  80. if (!sgt || buf->db_attach)
  81. return;
  82. dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
  83. buf->dma_dir);
  84. }
  85. static void vb2_dc_finish(void *buf_priv)
  86. {
  87. struct vb2_dc_buf *buf = buf_priv;
  88. struct sg_table *sgt = buf->dma_sgt;
  89. /* DMABUF exporter will flush the cache for us */
  90. if (!sgt || buf->db_attach)
  91. return;
  92. dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
  93. }
  94. /*********************************************/
  95. /* callbacks for MMAP buffers */
  96. /*********************************************/
  97. static void vb2_dc_put(void *buf_priv)
  98. {
  99. struct vb2_dc_buf *buf = buf_priv;
  100. if (!atomic_dec_and_test(&buf->refcount))
  101. return;
  102. if (buf->sgt_base) {
  103. sg_free_table(buf->sgt_base);
  104. kfree(buf->sgt_base);
  105. }
  106. dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
  107. buf->attrs);
  108. put_device(buf->dev);
  109. kfree(buf);
  110. }
  111. static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
  112. unsigned long size, enum dma_data_direction dma_dir,
  113. gfp_t gfp_flags)
  114. {
  115. struct vb2_dc_buf *buf;
  116. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  117. if (!buf)
  118. return ERR_PTR(-ENOMEM);
  119. if (attrs)
  120. buf->attrs = attrs;
  121. buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
  122. GFP_KERNEL | gfp_flags, buf->attrs);
  123. if (!buf->cookie) {
  124. dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
  125. kfree(buf);
  126. return ERR_PTR(-ENOMEM);
  127. }
  128. if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
  129. buf->vaddr = buf->cookie;
  130. /* Prevent the device from being released while the buffer is used */
  131. buf->dev = get_device(dev);
  132. buf->size = size;
  133. buf->dma_dir = dma_dir;
  134. buf->handler.refcount = &buf->refcount;
  135. buf->handler.put = vb2_dc_put;
  136. buf->handler.arg = buf;
  137. atomic_inc(&buf->refcount);
  138. return buf;
  139. }
  140. static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
  141. {
  142. struct vb2_dc_buf *buf = buf_priv;
  143. int ret;
  144. if (!buf) {
  145. printk(KERN_ERR "No buffer to map\n");
  146. return -EINVAL;
  147. }
  148. /*
  149. * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
  150. * map whole buffer
  151. */
  152. vma->vm_pgoff = 0;
  153. ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
  154. buf->dma_addr, buf->size, buf->attrs);
  155. if (ret) {
  156. pr_err("Remapping memory failed, error: %d\n", ret);
  157. return ret;
  158. }
  159. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  160. vma->vm_private_data = &buf->handler;
  161. vma->vm_ops = &vb2_common_vm_ops;
  162. vma->vm_ops->open(vma);
  163. pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
  164. __func__, (unsigned long)buf->dma_addr, vma->vm_start,
  165. buf->size);
  166. return 0;
  167. }
  168. /*********************************************/
  169. /* DMABUF ops for exporters */
  170. /*********************************************/
  171. struct vb2_dc_attachment {
  172. struct sg_table sgt;
  173. enum dma_data_direction dma_dir;
  174. };
  175. static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
  176. struct dma_buf_attachment *dbuf_attach)
  177. {
  178. struct vb2_dc_attachment *attach;
  179. unsigned int i;
  180. struct scatterlist *rd, *wr;
  181. struct sg_table *sgt;
  182. struct vb2_dc_buf *buf = dbuf->priv;
  183. int ret;
  184. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  185. if (!attach)
  186. return -ENOMEM;
  187. sgt = &attach->sgt;
  188. /* Copy the buf->base_sgt scatter list to the attachment, as we can't
  189. * map the same scatter list to multiple attachments at the same time.
  190. */
  191. ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
  192. if (ret) {
  193. kfree(attach);
  194. return -ENOMEM;
  195. }
  196. rd = buf->sgt_base->sgl;
  197. wr = sgt->sgl;
  198. for (i = 0; i < sgt->orig_nents; ++i) {
  199. sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
  200. rd = sg_next(rd);
  201. wr = sg_next(wr);
  202. }
  203. attach->dma_dir = DMA_NONE;
  204. dbuf_attach->priv = attach;
  205. return 0;
  206. }
  207. static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
  208. struct dma_buf_attachment *db_attach)
  209. {
  210. struct vb2_dc_attachment *attach = db_attach->priv;
  211. struct sg_table *sgt;
  212. if (!attach)
  213. return;
  214. sgt = &attach->sgt;
  215. /* release the scatterlist cache */
  216. if (attach->dma_dir != DMA_NONE)
  217. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  218. attach->dma_dir);
  219. sg_free_table(sgt);
  220. kfree(attach);
  221. db_attach->priv = NULL;
  222. }
  223. static struct sg_table *vb2_dc_dmabuf_ops_map(
  224. struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
  225. {
  226. struct vb2_dc_attachment *attach = db_attach->priv;
  227. /* stealing dmabuf mutex to serialize map/unmap operations */
  228. struct mutex *lock = &db_attach->dmabuf->lock;
  229. struct sg_table *sgt;
  230. mutex_lock(lock);
  231. sgt = &attach->sgt;
  232. /* return previously mapped sg table */
  233. if (attach->dma_dir == dma_dir) {
  234. mutex_unlock(lock);
  235. return sgt;
  236. }
  237. /* release any previous cache */
  238. if (attach->dma_dir != DMA_NONE) {
  239. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  240. attach->dma_dir);
  241. attach->dma_dir = DMA_NONE;
  242. }
  243. /* mapping to the client with new direction */
  244. sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  245. dma_dir);
  246. if (!sgt->nents) {
  247. pr_err("failed to map scatterlist\n");
  248. mutex_unlock(lock);
  249. return ERR_PTR(-EIO);
  250. }
  251. attach->dma_dir = dma_dir;
  252. mutex_unlock(lock);
  253. return sgt;
  254. }
  255. static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
  256. struct sg_table *sgt, enum dma_data_direction dma_dir)
  257. {
  258. /* nothing to be done here */
  259. }
  260. static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
  261. {
  262. /* drop reference obtained in vb2_dc_get_dmabuf */
  263. vb2_dc_put(dbuf->priv);
  264. }
  265. static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
  266. {
  267. struct vb2_dc_buf *buf = dbuf->priv;
  268. return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
  269. }
  270. static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
  271. {
  272. struct vb2_dc_buf *buf = dbuf->priv;
  273. return buf->vaddr;
  274. }
  275. static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
  276. struct vm_area_struct *vma)
  277. {
  278. return vb2_dc_mmap(dbuf->priv, vma);
  279. }
  280. static struct dma_buf_ops vb2_dc_dmabuf_ops = {
  281. .attach = vb2_dc_dmabuf_ops_attach,
  282. .detach = vb2_dc_dmabuf_ops_detach,
  283. .map_dma_buf = vb2_dc_dmabuf_ops_map,
  284. .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
  285. .kmap = vb2_dc_dmabuf_ops_kmap,
  286. .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
  287. .vmap = vb2_dc_dmabuf_ops_vmap,
  288. .mmap = vb2_dc_dmabuf_ops_mmap,
  289. .release = vb2_dc_dmabuf_ops_release,
  290. };
  291. static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
  292. {
  293. int ret;
  294. struct sg_table *sgt;
  295. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  296. if (!sgt) {
  297. dev_err(buf->dev, "failed to alloc sg table\n");
  298. return NULL;
  299. }
  300. ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
  301. buf->size, buf->attrs);
  302. if (ret < 0) {
  303. dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
  304. kfree(sgt);
  305. return NULL;
  306. }
  307. return sgt;
  308. }
  309. static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
  310. {
  311. struct vb2_dc_buf *buf = buf_priv;
  312. struct dma_buf *dbuf;
  313. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  314. exp_info.ops = &vb2_dc_dmabuf_ops;
  315. exp_info.size = buf->size;
  316. exp_info.flags = flags;
  317. exp_info.priv = buf;
  318. if (!buf->sgt_base)
  319. buf->sgt_base = vb2_dc_get_base_sgt(buf);
  320. if (WARN_ON(!buf->sgt_base))
  321. return NULL;
  322. dbuf = dma_buf_export(&exp_info);
  323. if (IS_ERR(dbuf))
  324. return NULL;
  325. /* dmabuf keeps reference to vb2 buffer */
  326. atomic_inc(&buf->refcount);
  327. return dbuf;
  328. }
  329. /*********************************************/
  330. /* callbacks for USERPTR buffers */
  331. /*********************************************/
  332. static void vb2_dc_put_userptr(void *buf_priv)
  333. {
  334. struct vb2_dc_buf *buf = buf_priv;
  335. struct sg_table *sgt = buf->dma_sgt;
  336. int i;
  337. struct page **pages;
  338. if (sgt) {
  339. /*
  340. * No need to sync to CPU, it's already synced to the CPU
  341. * since the finish() memop will have been called before this.
  342. */
  343. dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
  344. buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
  345. pages = frame_vector_pages(buf->vec);
  346. /* sgt should exist only if vector contains pages... */
  347. BUG_ON(IS_ERR(pages));
  348. for (i = 0; i < frame_vector_count(buf->vec); i++)
  349. set_page_dirty_lock(pages[i]);
  350. sg_free_table(sgt);
  351. kfree(sgt);
  352. }
  353. vb2_destroy_framevec(buf->vec);
  354. kfree(buf);
  355. }
  356. /*
  357. * For some kind of reserved memory there might be no struct page available,
  358. * so all that can be done to support such 'pages' is to try to convert
  359. * pfn to dma address or at the last resort just assume that
  360. * dma address == physical address (like it has been assumed in earlier version
  361. * of videobuf2-dma-contig
  362. */
  363. #ifdef __arch_pfn_to_dma
  364. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  365. {
  366. return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
  367. }
  368. #elif defined(__pfn_to_bus)
  369. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  370. {
  371. return (dma_addr_t)__pfn_to_bus(pfn);
  372. }
  373. #elif defined(__pfn_to_phys)
  374. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  375. {
  376. return (dma_addr_t)__pfn_to_phys(pfn);
  377. }
  378. #else
  379. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  380. {
  381. /* really, we cannot do anything better at this point */
  382. return (dma_addr_t)(pfn) << PAGE_SHIFT;
  383. }
  384. #endif
  385. static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
  386. unsigned long size, enum dma_data_direction dma_dir)
  387. {
  388. struct vb2_dc_buf *buf;
  389. struct frame_vector *vec;
  390. unsigned long offset;
  391. int n_pages, i;
  392. int ret = 0;
  393. struct sg_table *sgt;
  394. unsigned long contig_size;
  395. unsigned long dma_align = dma_get_cache_alignment();
  396. /* Only cache aligned DMA transfers are reliable */
  397. if (!IS_ALIGNED(vaddr | size, dma_align)) {
  398. pr_debug("user data must be aligned to %lu bytes\n", dma_align);
  399. return ERR_PTR(-EINVAL);
  400. }
  401. if (!size) {
  402. pr_debug("size is zero\n");
  403. return ERR_PTR(-EINVAL);
  404. }
  405. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  406. if (!buf)
  407. return ERR_PTR(-ENOMEM);
  408. buf->dev = dev;
  409. buf->dma_dir = dma_dir;
  410. offset = vaddr & ~PAGE_MASK;
  411. vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
  412. if (IS_ERR(vec)) {
  413. ret = PTR_ERR(vec);
  414. goto fail_buf;
  415. }
  416. buf->vec = vec;
  417. n_pages = frame_vector_count(vec);
  418. ret = frame_vector_to_pages(vec);
  419. if (ret < 0) {
  420. unsigned long *nums = frame_vector_pfns(vec);
  421. /*
  422. * Failed to convert to pages... Check the memory is physically
  423. * contiguous and use direct mapping
  424. */
  425. for (i = 1; i < n_pages; i++)
  426. if (nums[i-1] + 1 != nums[i])
  427. goto fail_pfnvec;
  428. buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
  429. goto out;
  430. }
  431. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  432. if (!sgt) {
  433. pr_err("failed to allocate sg table\n");
  434. ret = -ENOMEM;
  435. goto fail_pfnvec;
  436. }
  437. ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
  438. offset, size, GFP_KERNEL);
  439. if (ret) {
  440. pr_err("failed to initialize sg table\n");
  441. goto fail_sgt;
  442. }
  443. /*
  444. * No need to sync to the device, this will happen later when the
  445. * prepare() memop is called.
  446. */
  447. sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
  448. buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
  449. if (sgt->nents <= 0) {
  450. pr_err("failed to map scatterlist\n");
  451. ret = -EIO;
  452. goto fail_sgt_init;
  453. }
  454. contig_size = vb2_dc_get_contiguous_size(sgt);
  455. if (contig_size < size) {
  456. pr_err("contiguous mapping is too small %lu/%lu\n",
  457. contig_size, size);
  458. ret = -EFAULT;
  459. goto fail_map_sg;
  460. }
  461. buf->dma_addr = sg_dma_address(sgt->sgl);
  462. buf->dma_sgt = sgt;
  463. out:
  464. buf->size = size;
  465. return buf;
  466. fail_map_sg:
  467. dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
  468. buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
  469. fail_sgt_init:
  470. sg_free_table(sgt);
  471. fail_sgt:
  472. kfree(sgt);
  473. fail_pfnvec:
  474. vb2_destroy_framevec(vec);
  475. fail_buf:
  476. kfree(buf);
  477. return ERR_PTR(ret);
  478. }
  479. /*********************************************/
  480. /* callbacks for DMABUF buffers */
  481. /*********************************************/
  482. static int vb2_dc_map_dmabuf(void *mem_priv)
  483. {
  484. struct vb2_dc_buf *buf = mem_priv;
  485. struct sg_table *sgt;
  486. unsigned long contig_size;
  487. if (WARN_ON(!buf->db_attach)) {
  488. pr_err("trying to pin a non attached buffer\n");
  489. return -EINVAL;
  490. }
  491. if (WARN_ON(buf->dma_sgt)) {
  492. pr_err("dmabuf buffer is already pinned\n");
  493. return 0;
  494. }
  495. /* get the associated scatterlist for this buffer */
  496. sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
  497. if (IS_ERR(sgt)) {
  498. pr_err("Error getting dmabuf scatterlist\n");
  499. return -EINVAL;
  500. }
  501. /* checking if dmabuf is big enough to store contiguous chunk */
  502. contig_size = vb2_dc_get_contiguous_size(sgt);
  503. if (contig_size < buf->size) {
  504. pr_err("contiguous chunk is too small %lu/%lu b\n",
  505. contig_size, buf->size);
  506. dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
  507. return -EFAULT;
  508. }
  509. buf->dma_addr = sg_dma_address(sgt->sgl);
  510. buf->dma_sgt = sgt;
  511. buf->vaddr = NULL;
  512. return 0;
  513. }
  514. static void vb2_dc_unmap_dmabuf(void *mem_priv)
  515. {
  516. struct vb2_dc_buf *buf = mem_priv;
  517. struct sg_table *sgt = buf->dma_sgt;
  518. if (WARN_ON(!buf->db_attach)) {
  519. pr_err("trying to unpin a not attached buffer\n");
  520. return;
  521. }
  522. if (WARN_ON(!sgt)) {
  523. pr_err("dmabuf buffer is already unpinned\n");
  524. return;
  525. }
  526. if (buf->vaddr) {
  527. dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
  528. buf->vaddr = NULL;
  529. }
  530. dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
  531. buf->dma_addr = 0;
  532. buf->dma_sgt = NULL;
  533. }
  534. static void vb2_dc_detach_dmabuf(void *mem_priv)
  535. {
  536. struct vb2_dc_buf *buf = mem_priv;
  537. /* if vb2 works correctly you should never detach mapped buffer */
  538. if (WARN_ON(buf->dma_addr))
  539. vb2_dc_unmap_dmabuf(buf);
  540. /* detach this attachment */
  541. dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
  542. kfree(buf);
  543. }
  544. static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
  545. unsigned long size, enum dma_data_direction dma_dir)
  546. {
  547. struct vb2_dc_buf *buf;
  548. struct dma_buf_attachment *dba;
  549. if (dbuf->size < size)
  550. return ERR_PTR(-EFAULT);
  551. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  552. if (!buf)
  553. return ERR_PTR(-ENOMEM);
  554. buf->dev = dev;
  555. /* create attachment for the dmabuf with the user device */
  556. dba = dma_buf_attach(dbuf, buf->dev);
  557. if (IS_ERR(dba)) {
  558. pr_err("failed to attach dmabuf\n");
  559. kfree(buf);
  560. return dba;
  561. }
  562. buf->dma_dir = dma_dir;
  563. buf->size = size;
  564. buf->db_attach = dba;
  565. return buf;
  566. }
  567. /*********************************************/
  568. /* DMA CONTIG exported functions */
  569. /*********************************************/
  570. const struct vb2_mem_ops vb2_dma_contig_memops = {
  571. .alloc = vb2_dc_alloc,
  572. .put = vb2_dc_put,
  573. .get_dmabuf = vb2_dc_get_dmabuf,
  574. .cookie = vb2_dc_cookie,
  575. .vaddr = vb2_dc_vaddr,
  576. .mmap = vb2_dc_mmap,
  577. .get_userptr = vb2_dc_get_userptr,
  578. .put_userptr = vb2_dc_put_userptr,
  579. .prepare = vb2_dc_prepare,
  580. .finish = vb2_dc_finish,
  581. .map_dmabuf = vb2_dc_map_dmabuf,
  582. .unmap_dmabuf = vb2_dc_unmap_dmabuf,
  583. .attach_dmabuf = vb2_dc_attach_dmabuf,
  584. .detach_dmabuf = vb2_dc_detach_dmabuf,
  585. .num_users = vb2_dc_num_users,
  586. };
  587. EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
  588. /**
  589. * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
  590. * @dev: device for configuring DMA parameters
  591. * @size: size of DMA max segment size to set
  592. *
  593. * To allow mapping the scatter-list into a single chunk in the DMA
  594. * address space, the device is required to have the DMA max segment
  595. * size parameter set to a value larger than the buffer size. Otherwise,
  596. * the DMA-mapping subsystem will split the mapping into max segment
  597. * size chunks. This function sets the DMA max segment size
  598. * parameter to let DMA-mapping map a buffer as a single chunk in DMA
  599. * address space.
  600. * This code assumes that the DMA-mapping subsystem will merge all
  601. * scatterlist segments if this is really possible (for example when
  602. * an IOMMU is available and enabled).
  603. * Ideally, this parameter should be set by the generic bus code, but it
  604. * is left with the default 64KiB value due to historical litmiations in
  605. * other subsystems (like limited USB host drivers) and there no good
  606. * place to set it to the proper value.
  607. * This function should be called from the drivers, which are known to
  608. * operate on platforms with IOMMU and provide access to shared buffers
  609. * (either USERPTR or DMABUF). This should be done before initializing
  610. * videobuf2 queue.
  611. */
  612. int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
  613. {
  614. if (!dev->dma_parms) {
  615. dev->dma_parms = kzalloc(sizeof(dev->dma_parms), GFP_KERNEL);
  616. if (!dev->dma_parms)
  617. return -ENOMEM;
  618. }
  619. if (dma_get_max_seg_size(dev) < size)
  620. return dma_set_max_seg_size(dev, size);
  621. return 0;
  622. }
  623. EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
  624. /*
  625. * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
  626. * @dev: device for configuring DMA parameters
  627. *
  628. * This function releases resources allocated to configure DMA parameters
  629. * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
  630. * device drivers on driver remove.
  631. */
  632. void vb2_dma_contig_clear_max_seg_size(struct device *dev)
  633. {
  634. kfree(dev->dma_parms);
  635. dev->dma_parms = NULL;
  636. }
  637. EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
  638. MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
  639. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  640. MODULE_LICENSE("GPL");