videobuf2-dma-contig.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783
  1. /*
  2. * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/dma-buf.h>
  13. #include <linux/module.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/dma-mapping.h>
  18. #include <media/videobuf2-v4l2.h>
  19. #include <media/videobuf2-dma-contig.h>
  20. #include <media/videobuf2-memops.h>
  21. struct vb2_dc_buf {
  22. struct device *dev;
  23. void *vaddr;
  24. unsigned long size;
  25. void *cookie;
  26. dma_addr_t dma_addr;
  27. unsigned long attrs;
  28. enum dma_data_direction dma_dir;
  29. struct sg_table *dma_sgt;
  30. struct frame_vector *vec;
  31. /* MMAP related */
  32. struct vb2_vmarea_handler handler;
  33. atomic_t refcount;
  34. struct sg_table *sgt_base;
  35. /* DMABUF related */
  36. struct dma_buf_attachment *db_attach;
  37. };
  38. /*********************************************/
  39. /* scatterlist table functions */
  40. /*********************************************/
  41. static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
  42. {
  43. struct scatterlist *s;
  44. dma_addr_t expected = sg_dma_address(sgt->sgl);
  45. unsigned int i;
  46. unsigned long size = 0;
  47. for_each_sg(sgt->sgl, s, sgt->nents, i) {
  48. if (sg_dma_address(s) != expected)
  49. break;
  50. expected = sg_dma_address(s) + sg_dma_len(s);
  51. size += sg_dma_len(s);
  52. }
  53. return size;
  54. }
  55. /*********************************************/
  56. /* callbacks for all buffers */
  57. /*********************************************/
  58. static void *vb2_dc_cookie(void *buf_priv)
  59. {
  60. struct vb2_dc_buf *buf = buf_priv;
  61. return &buf->dma_addr;
  62. }
  63. static void *vb2_dc_vaddr(void *buf_priv)
  64. {
  65. struct vb2_dc_buf *buf = buf_priv;
  66. if (!buf->vaddr && buf->db_attach)
  67. buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
  68. return buf->vaddr;
  69. }
  70. static unsigned int vb2_dc_num_users(void *buf_priv)
  71. {
  72. struct vb2_dc_buf *buf = buf_priv;
  73. return atomic_read(&buf->refcount);
  74. }
  75. static void vb2_dc_prepare(void *buf_priv)
  76. {
  77. struct vb2_dc_buf *buf = buf_priv;
  78. struct sg_table *sgt = buf->dma_sgt;
  79. /* DMABUF exporter will flush the cache for us */
  80. if (!sgt || buf->db_attach)
  81. return;
  82. dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
  83. buf->dma_dir);
  84. }
  85. static void vb2_dc_finish(void *buf_priv)
  86. {
  87. struct vb2_dc_buf *buf = buf_priv;
  88. struct sg_table *sgt = buf->dma_sgt;
  89. /* DMABUF exporter will flush the cache for us */
  90. if (!sgt || buf->db_attach)
  91. return;
  92. dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
  93. }
  94. /*********************************************/
  95. /* callbacks for MMAP buffers */
  96. /*********************************************/
  97. static void vb2_dc_put(void *buf_priv)
  98. {
  99. struct vb2_dc_buf *buf = buf_priv;
  100. if (!atomic_dec_and_test(&buf->refcount))
  101. return;
  102. if (buf->sgt_base) {
  103. sg_free_table(buf->sgt_base);
  104. kfree(buf->sgt_base);
  105. }
  106. dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
  107. buf->attrs);
  108. put_device(buf->dev);
  109. kfree(buf);
  110. }
  111. static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
  112. unsigned long size, enum dma_data_direction dma_dir,
  113. gfp_t gfp_flags)
  114. {
  115. struct vb2_dc_buf *buf;
  116. if (WARN_ON(!dev))
  117. return ERR_PTR(-EINVAL);
  118. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  119. if (!buf)
  120. return ERR_PTR(-ENOMEM);
  121. if (attrs)
  122. buf->attrs = attrs;
  123. buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
  124. GFP_KERNEL | gfp_flags, buf->attrs);
  125. if (!buf->cookie) {
  126. dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
  127. kfree(buf);
  128. return ERR_PTR(-ENOMEM);
  129. }
  130. if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
  131. buf->vaddr = buf->cookie;
  132. /* Prevent the device from being released while the buffer is used */
  133. buf->dev = get_device(dev);
  134. buf->size = size;
  135. buf->dma_dir = dma_dir;
  136. buf->handler.refcount = &buf->refcount;
  137. buf->handler.put = vb2_dc_put;
  138. buf->handler.arg = buf;
  139. atomic_inc(&buf->refcount);
  140. return buf;
  141. }
  142. static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
  143. {
  144. struct vb2_dc_buf *buf = buf_priv;
  145. int ret;
  146. if (!buf) {
  147. printk(KERN_ERR "No buffer to map\n");
  148. return -EINVAL;
  149. }
  150. /*
  151. * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
  152. * map whole buffer
  153. */
  154. vma->vm_pgoff = 0;
  155. ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
  156. buf->dma_addr, buf->size, buf->attrs);
  157. if (ret) {
  158. pr_err("Remapping memory failed, error: %d\n", ret);
  159. return ret;
  160. }
  161. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  162. vma->vm_private_data = &buf->handler;
  163. vma->vm_ops = &vb2_common_vm_ops;
  164. vma->vm_ops->open(vma);
  165. pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
  166. __func__, (unsigned long)buf->dma_addr, vma->vm_start,
  167. buf->size);
  168. return 0;
  169. }
  170. /*********************************************/
  171. /* DMABUF ops for exporters */
  172. /*********************************************/
  173. struct vb2_dc_attachment {
  174. struct sg_table sgt;
  175. enum dma_data_direction dma_dir;
  176. };
  177. static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
  178. struct dma_buf_attachment *dbuf_attach)
  179. {
  180. struct vb2_dc_attachment *attach;
  181. unsigned int i;
  182. struct scatterlist *rd, *wr;
  183. struct sg_table *sgt;
  184. struct vb2_dc_buf *buf = dbuf->priv;
  185. int ret;
  186. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  187. if (!attach)
  188. return -ENOMEM;
  189. sgt = &attach->sgt;
  190. /* Copy the buf->base_sgt scatter list to the attachment, as we can't
  191. * map the same scatter list to multiple attachments at the same time.
  192. */
  193. ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
  194. if (ret) {
  195. kfree(attach);
  196. return -ENOMEM;
  197. }
  198. rd = buf->sgt_base->sgl;
  199. wr = sgt->sgl;
  200. for (i = 0; i < sgt->orig_nents; ++i) {
  201. sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
  202. rd = sg_next(rd);
  203. wr = sg_next(wr);
  204. }
  205. attach->dma_dir = DMA_NONE;
  206. dbuf_attach->priv = attach;
  207. return 0;
  208. }
  209. static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
  210. struct dma_buf_attachment *db_attach)
  211. {
  212. struct vb2_dc_attachment *attach = db_attach->priv;
  213. struct sg_table *sgt;
  214. if (!attach)
  215. return;
  216. sgt = &attach->sgt;
  217. /* release the scatterlist cache */
  218. if (attach->dma_dir != DMA_NONE)
  219. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  220. attach->dma_dir);
  221. sg_free_table(sgt);
  222. kfree(attach);
  223. db_attach->priv = NULL;
  224. }
  225. static struct sg_table *vb2_dc_dmabuf_ops_map(
  226. struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
  227. {
  228. struct vb2_dc_attachment *attach = db_attach->priv;
  229. /* stealing dmabuf mutex to serialize map/unmap operations */
  230. struct mutex *lock = &db_attach->dmabuf->lock;
  231. struct sg_table *sgt;
  232. mutex_lock(lock);
  233. sgt = &attach->sgt;
  234. /* return previously mapped sg table */
  235. if (attach->dma_dir == dma_dir) {
  236. mutex_unlock(lock);
  237. return sgt;
  238. }
  239. /* release any previous cache */
  240. if (attach->dma_dir != DMA_NONE) {
  241. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  242. attach->dma_dir);
  243. attach->dma_dir = DMA_NONE;
  244. }
  245. /* mapping to the client with new direction */
  246. sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  247. dma_dir);
  248. if (!sgt->nents) {
  249. pr_err("failed to map scatterlist\n");
  250. mutex_unlock(lock);
  251. return ERR_PTR(-EIO);
  252. }
  253. attach->dma_dir = dma_dir;
  254. mutex_unlock(lock);
  255. return sgt;
  256. }
  257. static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
  258. struct sg_table *sgt, enum dma_data_direction dma_dir)
  259. {
  260. /* nothing to be done here */
  261. }
  262. static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
  263. {
  264. /* drop reference obtained in vb2_dc_get_dmabuf */
  265. vb2_dc_put(dbuf->priv);
  266. }
  267. static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
  268. {
  269. struct vb2_dc_buf *buf = dbuf->priv;
  270. return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
  271. }
  272. static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
  273. {
  274. struct vb2_dc_buf *buf = dbuf->priv;
  275. return buf->vaddr;
  276. }
  277. static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
  278. struct vm_area_struct *vma)
  279. {
  280. return vb2_dc_mmap(dbuf->priv, vma);
  281. }
  282. static struct dma_buf_ops vb2_dc_dmabuf_ops = {
  283. .attach = vb2_dc_dmabuf_ops_attach,
  284. .detach = vb2_dc_dmabuf_ops_detach,
  285. .map_dma_buf = vb2_dc_dmabuf_ops_map,
  286. .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
  287. .kmap = vb2_dc_dmabuf_ops_kmap,
  288. .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
  289. .vmap = vb2_dc_dmabuf_ops_vmap,
  290. .mmap = vb2_dc_dmabuf_ops_mmap,
  291. .release = vb2_dc_dmabuf_ops_release,
  292. };
  293. static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
  294. {
  295. int ret;
  296. struct sg_table *sgt;
  297. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  298. if (!sgt) {
  299. dev_err(buf->dev, "failed to alloc sg table\n");
  300. return NULL;
  301. }
  302. ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
  303. buf->size, buf->attrs);
  304. if (ret < 0) {
  305. dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
  306. kfree(sgt);
  307. return NULL;
  308. }
  309. return sgt;
  310. }
  311. static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
  312. {
  313. struct vb2_dc_buf *buf = buf_priv;
  314. struct dma_buf *dbuf;
  315. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  316. exp_info.ops = &vb2_dc_dmabuf_ops;
  317. exp_info.size = buf->size;
  318. exp_info.flags = flags;
  319. exp_info.priv = buf;
  320. if (!buf->sgt_base)
  321. buf->sgt_base = vb2_dc_get_base_sgt(buf);
  322. if (WARN_ON(!buf->sgt_base))
  323. return NULL;
  324. dbuf = dma_buf_export(&exp_info);
  325. if (IS_ERR(dbuf))
  326. return NULL;
  327. /* dmabuf keeps reference to vb2 buffer */
  328. atomic_inc(&buf->refcount);
  329. return dbuf;
  330. }
  331. /*********************************************/
  332. /* callbacks for USERPTR buffers */
  333. /*********************************************/
  334. static void vb2_dc_put_userptr(void *buf_priv)
  335. {
  336. struct vb2_dc_buf *buf = buf_priv;
  337. struct sg_table *sgt = buf->dma_sgt;
  338. int i;
  339. struct page **pages;
  340. if (sgt) {
  341. /*
  342. * No need to sync to CPU, it's already synced to the CPU
  343. * since the finish() memop will have been called before this.
  344. */
  345. dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
  346. buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
  347. pages = frame_vector_pages(buf->vec);
  348. /* sgt should exist only if vector contains pages... */
  349. BUG_ON(IS_ERR(pages));
  350. for (i = 0; i < frame_vector_count(buf->vec); i++)
  351. set_page_dirty_lock(pages[i]);
  352. sg_free_table(sgt);
  353. kfree(sgt);
  354. }
  355. vb2_destroy_framevec(buf->vec);
  356. kfree(buf);
  357. }
  358. /*
  359. * For some kind of reserved memory there might be no struct page available,
  360. * so all that can be done to support such 'pages' is to try to convert
  361. * pfn to dma address or at the last resort just assume that
  362. * dma address == physical address (like it has been assumed in earlier version
  363. * of videobuf2-dma-contig
  364. */
  365. #ifdef __arch_pfn_to_dma
  366. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  367. {
  368. return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
  369. }
  370. #elif defined(__pfn_to_bus)
  371. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  372. {
  373. return (dma_addr_t)__pfn_to_bus(pfn);
  374. }
  375. #elif defined(__pfn_to_phys)
  376. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  377. {
  378. return (dma_addr_t)__pfn_to_phys(pfn);
  379. }
  380. #else
  381. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  382. {
  383. /* really, we cannot do anything better at this point */
  384. return (dma_addr_t)(pfn) << PAGE_SHIFT;
  385. }
  386. #endif
  387. static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
  388. unsigned long size, enum dma_data_direction dma_dir)
  389. {
  390. struct vb2_dc_buf *buf;
  391. struct frame_vector *vec;
  392. unsigned long offset;
  393. int n_pages, i;
  394. int ret = 0;
  395. struct sg_table *sgt;
  396. unsigned long contig_size;
  397. unsigned long dma_align = dma_get_cache_alignment();
  398. /* Only cache aligned DMA transfers are reliable */
  399. if (!IS_ALIGNED(vaddr | size, dma_align)) {
  400. pr_debug("user data must be aligned to %lu bytes\n", dma_align);
  401. return ERR_PTR(-EINVAL);
  402. }
  403. if (!size) {
  404. pr_debug("size is zero\n");
  405. return ERR_PTR(-EINVAL);
  406. }
  407. if (WARN_ON(!dev))
  408. return ERR_PTR(-EINVAL);
  409. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  410. if (!buf)
  411. return ERR_PTR(-ENOMEM);
  412. buf->dev = dev;
  413. buf->dma_dir = dma_dir;
  414. offset = vaddr & ~PAGE_MASK;
  415. vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
  416. if (IS_ERR(vec)) {
  417. ret = PTR_ERR(vec);
  418. goto fail_buf;
  419. }
  420. buf->vec = vec;
  421. n_pages = frame_vector_count(vec);
  422. ret = frame_vector_to_pages(vec);
  423. if (ret < 0) {
  424. unsigned long *nums = frame_vector_pfns(vec);
  425. /*
  426. * Failed to convert to pages... Check the memory is physically
  427. * contiguous and use direct mapping
  428. */
  429. for (i = 1; i < n_pages; i++)
  430. if (nums[i-1] + 1 != nums[i])
  431. goto fail_pfnvec;
  432. buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
  433. goto out;
  434. }
  435. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  436. if (!sgt) {
  437. pr_err("failed to allocate sg table\n");
  438. ret = -ENOMEM;
  439. goto fail_pfnvec;
  440. }
  441. ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
  442. offset, size, GFP_KERNEL);
  443. if (ret) {
  444. pr_err("failed to initialize sg table\n");
  445. goto fail_sgt;
  446. }
  447. /*
  448. * No need to sync to the device, this will happen later when the
  449. * prepare() memop is called.
  450. */
  451. sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
  452. buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
  453. if (sgt->nents <= 0) {
  454. pr_err("failed to map scatterlist\n");
  455. ret = -EIO;
  456. goto fail_sgt_init;
  457. }
  458. contig_size = vb2_dc_get_contiguous_size(sgt);
  459. if (contig_size < size) {
  460. pr_err("contiguous mapping is too small %lu/%lu\n",
  461. contig_size, size);
  462. ret = -EFAULT;
  463. goto fail_map_sg;
  464. }
  465. buf->dma_addr = sg_dma_address(sgt->sgl);
  466. buf->dma_sgt = sgt;
  467. out:
  468. buf->size = size;
  469. return buf;
  470. fail_map_sg:
  471. dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
  472. buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
  473. fail_sgt_init:
  474. sg_free_table(sgt);
  475. fail_sgt:
  476. kfree(sgt);
  477. fail_pfnvec:
  478. vb2_destroy_framevec(vec);
  479. fail_buf:
  480. kfree(buf);
  481. return ERR_PTR(ret);
  482. }
  483. /*********************************************/
  484. /* callbacks for DMABUF buffers */
  485. /*********************************************/
  486. static int vb2_dc_map_dmabuf(void *mem_priv)
  487. {
  488. struct vb2_dc_buf *buf = mem_priv;
  489. struct sg_table *sgt;
  490. unsigned long contig_size;
  491. if (WARN_ON(!buf->db_attach)) {
  492. pr_err("trying to pin a non attached buffer\n");
  493. return -EINVAL;
  494. }
  495. if (WARN_ON(buf->dma_sgt)) {
  496. pr_err("dmabuf buffer is already pinned\n");
  497. return 0;
  498. }
  499. /* get the associated scatterlist for this buffer */
  500. sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
  501. if (IS_ERR(sgt)) {
  502. pr_err("Error getting dmabuf scatterlist\n");
  503. return -EINVAL;
  504. }
  505. /* checking if dmabuf is big enough to store contiguous chunk */
  506. contig_size = vb2_dc_get_contiguous_size(sgt);
  507. if (contig_size < buf->size) {
  508. pr_err("contiguous chunk is too small %lu/%lu b\n",
  509. contig_size, buf->size);
  510. dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
  511. return -EFAULT;
  512. }
  513. buf->dma_addr = sg_dma_address(sgt->sgl);
  514. buf->dma_sgt = sgt;
  515. buf->vaddr = NULL;
  516. return 0;
  517. }
  518. static void vb2_dc_unmap_dmabuf(void *mem_priv)
  519. {
  520. struct vb2_dc_buf *buf = mem_priv;
  521. struct sg_table *sgt = buf->dma_sgt;
  522. if (WARN_ON(!buf->db_attach)) {
  523. pr_err("trying to unpin a not attached buffer\n");
  524. return;
  525. }
  526. if (WARN_ON(!sgt)) {
  527. pr_err("dmabuf buffer is already unpinned\n");
  528. return;
  529. }
  530. if (buf->vaddr) {
  531. dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
  532. buf->vaddr = NULL;
  533. }
  534. dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
  535. buf->dma_addr = 0;
  536. buf->dma_sgt = NULL;
  537. }
  538. static void vb2_dc_detach_dmabuf(void *mem_priv)
  539. {
  540. struct vb2_dc_buf *buf = mem_priv;
  541. /* if vb2 works correctly you should never detach mapped buffer */
  542. if (WARN_ON(buf->dma_addr))
  543. vb2_dc_unmap_dmabuf(buf);
  544. /* detach this attachment */
  545. dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
  546. kfree(buf);
  547. }
  548. static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
  549. unsigned long size, enum dma_data_direction dma_dir)
  550. {
  551. struct vb2_dc_buf *buf;
  552. struct dma_buf_attachment *dba;
  553. if (dbuf->size < size)
  554. return ERR_PTR(-EFAULT);
  555. if (WARN_ON(!dev))
  556. return ERR_PTR(-EINVAL);
  557. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  558. if (!buf)
  559. return ERR_PTR(-ENOMEM);
  560. buf->dev = dev;
  561. /* create attachment for the dmabuf with the user device */
  562. dba = dma_buf_attach(dbuf, buf->dev);
  563. if (IS_ERR(dba)) {
  564. pr_err("failed to attach dmabuf\n");
  565. kfree(buf);
  566. return dba;
  567. }
  568. buf->dma_dir = dma_dir;
  569. buf->size = size;
  570. buf->db_attach = dba;
  571. return buf;
  572. }
  573. /*********************************************/
  574. /* DMA CONTIG exported functions */
  575. /*********************************************/
  576. const struct vb2_mem_ops vb2_dma_contig_memops = {
  577. .alloc = vb2_dc_alloc,
  578. .put = vb2_dc_put,
  579. .get_dmabuf = vb2_dc_get_dmabuf,
  580. .cookie = vb2_dc_cookie,
  581. .vaddr = vb2_dc_vaddr,
  582. .mmap = vb2_dc_mmap,
  583. .get_userptr = vb2_dc_get_userptr,
  584. .put_userptr = vb2_dc_put_userptr,
  585. .prepare = vb2_dc_prepare,
  586. .finish = vb2_dc_finish,
  587. .map_dmabuf = vb2_dc_map_dmabuf,
  588. .unmap_dmabuf = vb2_dc_unmap_dmabuf,
  589. .attach_dmabuf = vb2_dc_attach_dmabuf,
  590. .detach_dmabuf = vb2_dc_detach_dmabuf,
  591. .num_users = vb2_dc_num_users,
  592. };
  593. EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
  594. /**
  595. * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
  596. * @dev: device for configuring DMA parameters
  597. * @size: size of DMA max segment size to set
  598. *
  599. * To allow mapping the scatter-list into a single chunk in the DMA
  600. * address space, the device is required to have the DMA max segment
  601. * size parameter set to a value larger than the buffer size. Otherwise,
  602. * the DMA-mapping subsystem will split the mapping into max segment
  603. * size chunks. This function sets the DMA max segment size
  604. * parameter to let DMA-mapping map a buffer as a single chunk in DMA
  605. * address space.
  606. * This code assumes that the DMA-mapping subsystem will merge all
  607. * scatterlist segments if this is really possible (for example when
  608. * an IOMMU is available and enabled).
  609. * Ideally, this parameter should be set by the generic bus code, but it
  610. * is left with the default 64KiB value due to historical litmiations in
  611. * other subsystems (like limited USB host drivers) and there no good
  612. * place to set it to the proper value.
  613. * This function should be called from the drivers, which are known to
  614. * operate on platforms with IOMMU and provide access to shared buffers
  615. * (either USERPTR or DMABUF). This should be done before initializing
  616. * videobuf2 queue.
  617. */
  618. int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
  619. {
  620. if (!dev->dma_parms) {
  621. dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
  622. if (!dev->dma_parms)
  623. return -ENOMEM;
  624. }
  625. if (dma_get_max_seg_size(dev) < size)
  626. return dma_set_max_seg_size(dev, size);
  627. return 0;
  628. }
  629. EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
  630. /*
  631. * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
  632. * @dev: device for configuring DMA parameters
  633. *
  634. * This function releases resources allocated to configure DMA parameters
  635. * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
  636. * device drivers on driver remove.
  637. */
  638. void vb2_dma_contig_clear_max_seg_size(struct device *dev)
  639. {
  640. kfree(dev->dma_parms);
  641. dev->dma_parms = NULL;
  642. }
  643. EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
  644. MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
  645. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  646. MODULE_LICENSE("GPL");