videobuf2-dma-contig.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885
  1. /*
  2. * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/dma-buf.h>
  13. #include <linux/module.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/dma-mapping.h>
  18. #include <media/videobuf2-core.h>
  19. #include <media/videobuf2-dma-contig.h>
  20. #include <media/videobuf2-memops.h>
  21. struct vb2_dc_conf {
  22. struct device *dev;
  23. };
  24. struct vb2_dc_buf {
  25. struct device *dev;
  26. void *vaddr;
  27. unsigned long size;
  28. dma_addr_t dma_addr;
  29. enum dma_data_direction dma_dir;
  30. struct sg_table *dma_sgt;
  31. /* MMAP related */
  32. struct vb2_vmarea_handler handler;
  33. atomic_t refcount;
  34. struct sg_table *sgt_base;
  35. /* USERPTR related */
  36. struct vm_area_struct *vma;
  37. /* DMABUF related */
  38. struct dma_buf_attachment *db_attach;
  39. };
  40. /*********************************************/
  41. /* scatterlist table functions */
  42. /*********************************************/
  43. static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
  44. void (*cb)(struct page *pg))
  45. {
  46. struct scatterlist *s;
  47. unsigned int i;
  48. for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
  49. struct page *page = sg_page(s);
  50. unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
  51. >> PAGE_SHIFT;
  52. unsigned int j;
  53. for (j = 0; j < n_pages; ++j, ++page)
  54. cb(page);
  55. }
  56. }
  57. static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
  58. {
  59. struct scatterlist *s;
  60. dma_addr_t expected = sg_dma_address(sgt->sgl);
  61. unsigned int i;
  62. unsigned long size = 0;
  63. for_each_sg(sgt->sgl, s, sgt->nents, i) {
  64. if (sg_dma_address(s) != expected)
  65. break;
  66. expected = sg_dma_address(s) + sg_dma_len(s);
  67. size += sg_dma_len(s);
  68. }
  69. return size;
  70. }
  71. /*********************************************/
  72. /* callbacks for all buffers */
  73. /*********************************************/
  74. static void *vb2_dc_cookie(void *buf_priv)
  75. {
  76. struct vb2_dc_buf *buf = buf_priv;
  77. return &buf->dma_addr;
  78. }
  79. static void *vb2_dc_vaddr(void *buf_priv)
  80. {
  81. struct vb2_dc_buf *buf = buf_priv;
  82. if (!buf->vaddr && buf->db_attach)
  83. buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
  84. return buf->vaddr;
  85. }
  86. static unsigned int vb2_dc_num_users(void *buf_priv)
  87. {
  88. struct vb2_dc_buf *buf = buf_priv;
  89. return atomic_read(&buf->refcount);
  90. }
  91. static void vb2_dc_prepare(void *buf_priv)
  92. {
  93. struct vb2_dc_buf *buf = buf_priv;
  94. struct sg_table *sgt = buf->dma_sgt;
  95. /* DMABUF exporter will flush the cache for us */
  96. if (!sgt || buf->db_attach)
  97. return;
  98. dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
  99. }
  100. static void vb2_dc_finish(void *buf_priv)
  101. {
  102. struct vb2_dc_buf *buf = buf_priv;
  103. struct sg_table *sgt = buf->dma_sgt;
  104. /* DMABUF exporter will flush the cache for us */
  105. if (!sgt || buf->db_attach)
  106. return;
  107. dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
  108. }
  109. /*********************************************/
  110. /* callbacks for MMAP buffers */
  111. /*********************************************/
  112. static void vb2_dc_put(void *buf_priv)
  113. {
  114. struct vb2_dc_buf *buf = buf_priv;
  115. if (!atomic_dec_and_test(&buf->refcount))
  116. return;
  117. if (buf->sgt_base) {
  118. sg_free_table(buf->sgt_base);
  119. kfree(buf->sgt_base);
  120. }
  121. dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
  122. put_device(buf->dev);
  123. kfree(buf);
  124. }
  125. static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
  126. enum dma_data_direction dma_dir, gfp_t gfp_flags)
  127. {
  128. struct vb2_dc_conf *conf = alloc_ctx;
  129. struct device *dev = conf->dev;
  130. struct vb2_dc_buf *buf;
  131. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  132. if (!buf)
  133. return ERR_PTR(-ENOMEM);
  134. buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
  135. GFP_KERNEL | gfp_flags);
  136. if (!buf->vaddr) {
  137. dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
  138. kfree(buf);
  139. return ERR_PTR(-ENOMEM);
  140. }
  141. /* Prevent the device from being released while the buffer is used */
  142. buf->dev = get_device(dev);
  143. buf->size = size;
  144. buf->dma_dir = dma_dir;
  145. buf->handler.refcount = &buf->refcount;
  146. buf->handler.put = vb2_dc_put;
  147. buf->handler.arg = buf;
  148. atomic_inc(&buf->refcount);
  149. return buf;
  150. }
  151. static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
  152. {
  153. struct vb2_dc_buf *buf = buf_priv;
  154. int ret;
  155. if (!buf) {
  156. printk(KERN_ERR "No buffer to map\n");
  157. return -EINVAL;
  158. }
  159. /*
  160. * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
  161. * map whole buffer
  162. */
  163. vma->vm_pgoff = 0;
  164. ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
  165. buf->dma_addr, buf->size);
  166. if (ret) {
  167. pr_err("Remapping memory failed, error: %d\n", ret);
  168. return ret;
  169. }
  170. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  171. vma->vm_private_data = &buf->handler;
  172. vma->vm_ops = &vb2_common_vm_ops;
  173. vma->vm_ops->open(vma);
  174. pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
  175. __func__, (unsigned long)buf->dma_addr, vma->vm_start,
  176. buf->size);
  177. return 0;
  178. }
  179. /*********************************************/
  180. /* DMABUF ops for exporters */
  181. /*********************************************/
  182. struct vb2_dc_attachment {
  183. struct sg_table sgt;
  184. enum dma_data_direction dma_dir;
  185. };
  186. static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
  187. struct dma_buf_attachment *dbuf_attach)
  188. {
  189. struct vb2_dc_attachment *attach;
  190. unsigned int i;
  191. struct scatterlist *rd, *wr;
  192. struct sg_table *sgt;
  193. struct vb2_dc_buf *buf = dbuf->priv;
  194. int ret;
  195. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  196. if (!attach)
  197. return -ENOMEM;
  198. sgt = &attach->sgt;
  199. /* Copy the buf->base_sgt scatter list to the attachment, as we can't
  200. * map the same scatter list to multiple attachments at the same time.
  201. */
  202. ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
  203. if (ret) {
  204. kfree(attach);
  205. return -ENOMEM;
  206. }
  207. rd = buf->sgt_base->sgl;
  208. wr = sgt->sgl;
  209. for (i = 0; i < sgt->orig_nents; ++i) {
  210. sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
  211. rd = sg_next(rd);
  212. wr = sg_next(wr);
  213. }
  214. attach->dma_dir = DMA_NONE;
  215. dbuf_attach->priv = attach;
  216. return 0;
  217. }
  218. static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
  219. struct dma_buf_attachment *db_attach)
  220. {
  221. struct vb2_dc_attachment *attach = db_attach->priv;
  222. struct sg_table *sgt;
  223. if (!attach)
  224. return;
  225. sgt = &attach->sgt;
  226. /* release the scatterlist cache */
  227. if (attach->dma_dir != DMA_NONE)
  228. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  229. attach->dma_dir);
  230. sg_free_table(sgt);
  231. kfree(attach);
  232. db_attach->priv = NULL;
  233. }
  234. static struct sg_table *vb2_dc_dmabuf_ops_map(
  235. struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
  236. {
  237. struct vb2_dc_attachment *attach = db_attach->priv;
  238. /* stealing dmabuf mutex to serialize map/unmap operations */
  239. struct mutex *lock = &db_attach->dmabuf->lock;
  240. struct sg_table *sgt;
  241. int ret;
  242. mutex_lock(lock);
  243. sgt = &attach->sgt;
  244. /* return previously mapped sg table */
  245. if (attach->dma_dir == dma_dir) {
  246. mutex_unlock(lock);
  247. return sgt;
  248. }
  249. /* release any previous cache */
  250. if (attach->dma_dir != DMA_NONE) {
  251. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  252. attach->dma_dir);
  253. attach->dma_dir = DMA_NONE;
  254. }
  255. /* mapping to the client with new direction */
  256. ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
  257. if (ret <= 0) {
  258. pr_err("failed to map scatterlist\n");
  259. mutex_unlock(lock);
  260. return ERR_PTR(-EIO);
  261. }
  262. attach->dma_dir = dma_dir;
  263. mutex_unlock(lock);
  264. return sgt;
  265. }
  266. static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
  267. struct sg_table *sgt, enum dma_data_direction dma_dir)
  268. {
  269. /* nothing to be done here */
  270. }
  271. static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
  272. {
  273. /* drop reference obtained in vb2_dc_get_dmabuf */
  274. vb2_dc_put(dbuf->priv);
  275. }
  276. static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
  277. {
  278. struct vb2_dc_buf *buf = dbuf->priv;
  279. return buf->vaddr + pgnum * PAGE_SIZE;
  280. }
  281. static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
  282. {
  283. struct vb2_dc_buf *buf = dbuf->priv;
  284. return buf->vaddr;
  285. }
  286. static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
  287. struct vm_area_struct *vma)
  288. {
  289. return vb2_dc_mmap(dbuf->priv, vma);
  290. }
  291. static struct dma_buf_ops vb2_dc_dmabuf_ops = {
  292. .attach = vb2_dc_dmabuf_ops_attach,
  293. .detach = vb2_dc_dmabuf_ops_detach,
  294. .map_dma_buf = vb2_dc_dmabuf_ops_map,
  295. .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
  296. .kmap = vb2_dc_dmabuf_ops_kmap,
  297. .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
  298. .vmap = vb2_dc_dmabuf_ops_vmap,
  299. .mmap = vb2_dc_dmabuf_ops_mmap,
  300. .release = vb2_dc_dmabuf_ops_release,
  301. };
  302. static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
  303. {
  304. int ret;
  305. struct sg_table *sgt;
  306. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  307. if (!sgt) {
  308. dev_err(buf->dev, "failed to alloc sg table\n");
  309. return NULL;
  310. }
  311. ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
  312. buf->size);
  313. if (ret < 0) {
  314. dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
  315. kfree(sgt);
  316. return NULL;
  317. }
  318. return sgt;
  319. }
  320. static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
  321. {
  322. struct vb2_dc_buf *buf = buf_priv;
  323. struct dma_buf *dbuf;
  324. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  325. exp_info.ops = &vb2_dc_dmabuf_ops;
  326. exp_info.size = buf->size;
  327. exp_info.flags = flags;
  328. exp_info.priv = buf;
  329. if (!buf->sgt_base)
  330. buf->sgt_base = vb2_dc_get_base_sgt(buf);
  331. if (WARN_ON(!buf->sgt_base))
  332. return NULL;
  333. dbuf = dma_buf_export(&exp_info);
  334. if (IS_ERR(dbuf))
  335. return NULL;
  336. /* dmabuf keeps reference to vb2 buffer */
  337. atomic_inc(&buf->refcount);
  338. return dbuf;
  339. }
  340. /*********************************************/
  341. /* callbacks for USERPTR buffers */
  342. /*********************************************/
  343. static inline int vma_is_io(struct vm_area_struct *vma)
  344. {
  345. return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
  346. }
  347. static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
  348. struct vm_area_struct *vma, unsigned long *res)
  349. {
  350. unsigned long pfn, start_pfn, prev_pfn;
  351. unsigned int i;
  352. int ret;
  353. if (!vma_is_io(vma))
  354. return -EFAULT;
  355. ret = follow_pfn(vma, start, &pfn);
  356. if (ret)
  357. return ret;
  358. start_pfn = pfn;
  359. start += PAGE_SIZE;
  360. for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
  361. prev_pfn = pfn;
  362. ret = follow_pfn(vma, start, &pfn);
  363. if (ret) {
  364. pr_err("no page for address %lu\n", start);
  365. return ret;
  366. }
  367. if (pfn != prev_pfn + 1)
  368. return -EINVAL;
  369. }
  370. *res = start_pfn;
  371. return 0;
  372. }
  373. static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
  374. int n_pages, struct vm_area_struct *vma,
  375. enum dma_data_direction dma_dir)
  376. {
  377. if (vma_is_io(vma)) {
  378. unsigned int i;
  379. for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
  380. unsigned long pfn;
  381. int ret = follow_pfn(vma, start, &pfn);
  382. if (!pfn_valid(pfn))
  383. return -EINVAL;
  384. if (ret) {
  385. pr_err("no page for address %lu\n", start);
  386. return ret;
  387. }
  388. pages[i] = pfn_to_page(pfn);
  389. }
  390. } else {
  391. int n;
  392. n = get_user_pages(current, current->mm, start & PAGE_MASK,
  393. n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
  394. /* negative error means that no page was pinned */
  395. n = max(n, 0);
  396. if (n != n_pages) {
  397. pr_err("got only %d of %d user pages\n", n, n_pages);
  398. while (n)
  399. put_page(pages[--n]);
  400. return -EFAULT;
  401. }
  402. }
  403. return 0;
  404. }
  405. static void vb2_dc_put_dirty_page(struct page *page)
  406. {
  407. set_page_dirty_lock(page);
  408. put_page(page);
  409. }
  410. static void vb2_dc_put_userptr(void *buf_priv)
  411. {
  412. struct vb2_dc_buf *buf = buf_priv;
  413. struct sg_table *sgt = buf->dma_sgt;
  414. if (sgt) {
  415. DEFINE_DMA_ATTRS(attrs);
  416. dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
  417. /*
  418. * No need to sync to CPU, it's already synced to the CPU
  419. * since the finish() memop will have been called before this.
  420. */
  421. dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
  422. buf->dma_dir, &attrs);
  423. if (!vma_is_io(buf->vma))
  424. vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
  425. sg_free_table(sgt);
  426. kfree(sgt);
  427. }
  428. vb2_put_vma(buf->vma);
  429. kfree(buf);
  430. }
  431. /*
  432. * For some kind of reserved memory there might be no struct page available,
  433. * so all that can be done to support such 'pages' is to try to convert
  434. * pfn to dma address or at the last resort just assume that
  435. * dma address == physical address (like it has been assumed in earlier version
  436. * of videobuf2-dma-contig
  437. */
  438. #ifdef __arch_pfn_to_dma
  439. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  440. {
  441. return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
  442. }
  443. #elif defined(__pfn_to_bus)
  444. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  445. {
  446. return (dma_addr_t)__pfn_to_bus(pfn);
  447. }
  448. #elif defined(__pfn_to_phys)
  449. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  450. {
  451. return (dma_addr_t)__pfn_to_phys(pfn);
  452. }
  453. #else
  454. static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
  455. {
  456. /* really, we cannot do anything better at this point */
  457. return (dma_addr_t)(pfn) << PAGE_SHIFT;
  458. }
  459. #endif
  460. static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
  461. unsigned long size, enum dma_data_direction dma_dir)
  462. {
  463. struct vb2_dc_conf *conf = alloc_ctx;
  464. struct vb2_dc_buf *buf;
  465. unsigned long start;
  466. unsigned long end;
  467. unsigned long offset;
  468. struct page **pages;
  469. int n_pages;
  470. int ret = 0;
  471. struct vm_area_struct *vma;
  472. struct sg_table *sgt;
  473. unsigned long contig_size;
  474. unsigned long dma_align = dma_get_cache_alignment();
  475. DEFINE_DMA_ATTRS(attrs);
  476. dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
  477. /* Only cache aligned DMA transfers are reliable */
  478. if (!IS_ALIGNED(vaddr | size, dma_align)) {
  479. pr_debug("user data must be aligned to %lu bytes\n", dma_align);
  480. return ERR_PTR(-EINVAL);
  481. }
  482. if (!size) {
  483. pr_debug("size is zero\n");
  484. return ERR_PTR(-EINVAL);
  485. }
  486. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  487. if (!buf)
  488. return ERR_PTR(-ENOMEM);
  489. buf->dev = conf->dev;
  490. buf->dma_dir = dma_dir;
  491. start = vaddr & PAGE_MASK;
  492. offset = vaddr & ~PAGE_MASK;
  493. end = PAGE_ALIGN(vaddr + size);
  494. n_pages = (end - start) >> PAGE_SHIFT;
  495. pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
  496. if (!pages) {
  497. ret = -ENOMEM;
  498. pr_err("failed to allocate pages table\n");
  499. goto fail_buf;
  500. }
  501. /* current->mm->mmap_sem is taken by videobuf2 core */
  502. vma = find_vma(current->mm, vaddr);
  503. if (!vma) {
  504. pr_err("no vma for address %lu\n", vaddr);
  505. ret = -EFAULT;
  506. goto fail_pages;
  507. }
  508. if (vma->vm_end < vaddr + size) {
  509. pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
  510. ret = -EFAULT;
  511. goto fail_pages;
  512. }
  513. buf->vma = vb2_get_vma(vma);
  514. if (!buf->vma) {
  515. pr_err("failed to copy vma\n");
  516. ret = -ENOMEM;
  517. goto fail_pages;
  518. }
  519. /* extract page list from userspace mapping */
  520. ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir);
  521. if (ret) {
  522. unsigned long pfn;
  523. if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
  524. buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
  525. buf->size = size;
  526. kfree(pages);
  527. return buf;
  528. }
  529. pr_err("failed to get user pages\n");
  530. goto fail_vma;
  531. }
  532. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  533. if (!sgt) {
  534. pr_err("failed to allocate sg table\n");
  535. ret = -ENOMEM;
  536. goto fail_get_user_pages;
  537. }
  538. ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
  539. offset, size, GFP_KERNEL);
  540. if (ret) {
  541. pr_err("failed to initialize sg table\n");
  542. goto fail_sgt;
  543. }
  544. /* pages are no longer needed */
  545. kfree(pages);
  546. pages = NULL;
  547. /*
  548. * No need to sync to the device, this will happen later when the
  549. * prepare() memop is called.
  550. */
  551. sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
  552. buf->dma_dir, &attrs);
  553. if (sgt->nents <= 0) {
  554. pr_err("failed to map scatterlist\n");
  555. ret = -EIO;
  556. goto fail_sgt_init;
  557. }
  558. contig_size = vb2_dc_get_contiguous_size(sgt);
  559. if (contig_size < size) {
  560. pr_err("contiguous mapping is too small %lu/%lu\n",
  561. contig_size, size);
  562. ret = -EFAULT;
  563. goto fail_map_sg;
  564. }
  565. buf->dma_addr = sg_dma_address(sgt->sgl);
  566. buf->size = size;
  567. buf->dma_sgt = sgt;
  568. return buf;
  569. fail_map_sg:
  570. dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
  571. buf->dma_dir, &attrs);
  572. fail_sgt_init:
  573. if (!vma_is_io(buf->vma))
  574. vb2_dc_sgt_foreach_page(sgt, put_page);
  575. sg_free_table(sgt);
  576. fail_sgt:
  577. kfree(sgt);
  578. fail_get_user_pages:
  579. if (pages && !vma_is_io(buf->vma))
  580. while (n_pages)
  581. put_page(pages[--n_pages]);
  582. fail_vma:
  583. vb2_put_vma(buf->vma);
  584. fail_pages:
  585. kfree(pages); /* kfree is NULL-proof */
  586. fail_buf:
  587. kfree(buf);
  588. return ERR_PTR(ret);
  589. }
  590. /*********************************************/
  591. /* callbacks for DMABUF buffers */
  592. /*********************************************/
  593. static int vb2_dc_map_dmabuf(void *mem_priv)
  594. {
  595. struct vb2_dc_buf *buf = mem_priv;
  596. struct sg_table *sgt;
  597. unsigned long contig_size;
  598. if (WARN_ON(!buf->db_attach)) {
  599. pr_err("trying to pin a non attached buffer\n");
  600. return -EINVAL;
  601. }
  602. if (WARN_ON(buf->dma_sgt)) {
  603. pr_err("dmabuf buffer is already pinned\n");
  604. return 0;
  605. }
  606. /* get the associated scatterlist for this buffer */
  607. sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
  608. if (IS_ERR(sgt)) {
  609. pr_err("Error getting dmabuf scatterlist\n");
  610. return -EINVAL;
  611. }
  612. /* checking if dmabuf is big enough to store contiguous chunk */
  613. contig_size = vb2_dc_get_contiguous_size(sgt);
  614. if (contig_size < buf->size) {
  615. pr_err("contiguous chunk is too small %lu/%lu b\n",
  616. contig_size, buf->size);
  617. dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
  618. return -EFAULT;
  619. }
  620. buf->dma_addr = sg_dma_address(sgt->sgl);
  621. buf->dma_sgt = sgt;
  622. buf->vaddr = NULL;
  623. return 0;
  624. }
  625. static void vb2_dc_unmap_dmabuf(void *mem_priv)
  626. {
  627. struct vb2_dc_buf *buf = mem_priv;
  628. struct sg_table *sgt = buf->dma_sgt;
  629. if (WARN_ON(!buf->db_attach)) {
  630. pr_err("trying to unpin a not attached buffer\n");
  631. return;
  632. }
  633. if (WARN_ON(!sgt)) {
  634. pr_err("dmabuf buffer is already unpinned\n");
  635. return;
  636. }
  637. if (buf->vaddr) {
  638. dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
  639. buf->vaddr = NULL;
  640. }
  641. dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
  642. buf->dma_addr = 0;
  643. buf->dma_sgt = NULL;
  644. }
  645. static void vb2_dc_detach_dmabuf(void *mem_priv)
  646. {
  647. struct vb2_dc_buf *buf = mem_priv;
  648. /* if vb2 works correctly you should never detach mapped buffer */
  649. if (WARN_ON(buf->dma_addr))
  650. vb2_dc_unmap_dmabuf(buf);
  651. /* detach this attachment */
  652. dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
  653. kfree(buf);
  654. }
  655. static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
  656. unsigned long size, enum dma_data_direction dma_dir)
  657. {
  658. struct vb2_dc_conf *conf = alloc_ctx;
  659. struct vb2_dc_buf *buf;
  660. struct dma_buf_attachment *dba;
  661. if (dbuf->size < size)
  662. return ERR_PTR(-EFAULT);
  663. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  664. if (!buf)
  665. return ERR_PTR(-ENOMEM);
  666. buf->dev = conf->dev;
  667. /* create attachment for the dmabuf with the user device */
  668. dba = dma_buf_attach(dbuf, buf->dev);
  669. if (IS_ERR(dba)) {
  670. pr_err("failed to attach dmabuf\n");
  671. kfree(buf);
  672. return dba;
  673. }
  674. buf->dma_dir = dma_dir;
  675. buf->size = size;
  676. buf->db_attach = dba;
  677. return buf;
  678. }
  679. /*********************************************/
  680. /* DMA CONTIG exported functions */
  681. /*********************************************/
  682. const struct vb2_mem_ops vb2_dma_contig_memops = {
  683. .alloc = vb2_dc_alloc,
  684. .put = vb2_dc_put,
  685. .get_dmabuf = vb2_dc_get_dmabuf,
  686. .cookie = vb2_dc_cookie,
  687. .vaddr = vb2_dc_vaddr,
  688. .mmap = vb2_dc_mmap,
  689. .get_userptr = vb2_dc_get_userptr,
  690. .put_userptr = vb2_dc_put_userptr,
  691. .prepare = vb2_dc_prepare,
  692. .finish = vb2_dc_finish,
  693. .map_dmabuf = vb2_dc_map_dmabuf,
  694. .unmap_dmabuf = vb2_dc_unmap_dmabuf,
  695. .attach_dmabuf = vb2_dc_attach_dmabuf,
  696. .detach_dmabuf = vb2_dc_detach_dmabuf,
  697. .num_users = vb2_dc_num_users,
  698. };
  699. EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
  700. void *vb2_dma_contig_init_ctx(struct device *dev)
  701. {
  702. struct vb2_dc_conf *conf;
  703. conf = kzalloc(sizeof *conf, GFP_KERNEL);
  704. if (!conf)
  705. return ERR_PTR(-ENOMEM);
  706. conf->dev = dev;
  707. return conf;
  708. }
  709. EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
  710. void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
  711. {
  712. if (!IS_ERR_OR_NULL(alloc_ctx))
  713. kfree(alloc_ctx);
  714. }
  715. EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
  716. MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
  717. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  718. MODULE_LICENSE("GPL");