videobuf2-vmalloc.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. /*
  2. * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/io.h>
  13. #include <linux/module.h>
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #include <media/videobuf2-core.h>
  19. #include <media/videobuf2-vmalloc.h>
  20. #include <media/videobuf2-memops.h>
  21. struct vb2_vmalloc_buf {
  22. void *vaddr;
  23. struct page **pages;
  24. struct vm_area_struct *vma;
  25. enum dma_data_direction dma_dir;
  26. unsigned long size;
  27. unsigned int n_pages;
  28. atomic_t refcount;
  29. struct vb2_vmarea_handler handler;
  30. struct dma_buf *dbuf;
  31. };
  32. static void vb2_vmalloc_put(void *buf_priv);
  33. static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size,
  34. enum dma_data_direction dma_dir, gfp_t gfp_flags)
  35. {
  36. struct vb2_vmalloc_buf *buf;
  37. buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
  38. if (!buf)
  39. return NULL;
  40. buf->size = size;
  41. buf->vaddr = vmalloc_user(buf->size);
  42. buf->dma_dir = dma_dir;
  43. buf->handler.refcount = &buf->refcount;
  44. buf->handler.put = vb2_vmalloc_put;
  45. buf->handler.arg = buf;
  46. if (!buf->vaddr) {
  47. pr_debug("vmalloc of size %ld failed\n", buf->size);
  48. kfree(buf);
  49. return NULL;
  50. }
  51. atomic_inc(&buf->refcount);
  52. return buf;
  53. }
  54. static void vb2_vmalloc_put(void *buf_priv)
  55. {
  56. struct vb2_vmalloc_buf *buf = buf_priv;
  57. if (atomic_dec_and_test(&buf->refcount)) {
  58. vfree(buf->vaddr);
  59. kfree(buf);
  60. }
  61. }
  62. static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
  63. unsigned long size,
  64. enum dma_data_direction dma_dir)
  65. {
  66. struct vb2_vmalloc_buf *buf;
  67. unsigned long first, last;
  68. int n_pages, offset;
  69. struct vm_area_struct *vma;
  70. dma_addr_t physp;
  71. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  72. if (!buf)
  73. return NULL;
  74. buf->dma_dir = dma_dir;
  75. offset = vaddr & ~PAGE_MASK;
  76. buf->size = size;
  77. vma = find_vma(current->mm, vaddr);
  78. if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
  79. if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
  80. goto fail_pages_array_alloc;
  81. buf->vma = vma;
  82. buf->vaddr = (__force void *)ioremap_nocache(physp, size);
  83. if (!buf->vaddr)
  84. goto fail_pages_array_alloc;
  85. } else {
  86. first = vaddr >> PAGE_SHIFT;
  87. last = (vaddr + size - 1) >> PAGE_SHIFT;
  88. buf->n_pages = last - first + 1;
  89. buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
  90. GFP_KERNEL);
  91. if (!buf->pages)
  92. goto fail_pages_array_alloc;
  93. /* current->mm->mmap_sem is taken by videobuf2 core */
  94. n_pages = get_user_pages(current, current->mm,
  95. vaddr & PAGE_MASK, buf->n_pages,
  96. dma_dir == DMA_FROM_DEVICE,
  97. 1, /* force */
  98. buf->pages, NULL);
  99. if (n_pages != buf->n_pages)
  100. goto fail_get_user_pages;
  101. buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
  102. PAGE_KERNEL);
  103. if (!buf->vaddr)
  104. goto fail_get_user_pages;
  105. }
  106. buf->vaddr += offset;
  107. return buf;
  108. fail_get_user_pages:
  109. pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
  110. buf->n_pages);
  111. while (--n_pages >= 0)
  112. put_page(buf->pages[n_pages]);
  113. kfree(buf->pages);
  114. fail_pages_array_alloc:
  115. kfree(buf);
  116. return NULL;
  117. }
  118. static void vb2_vmalloc_put_userptr(void *buf_priv)
  119. {
  120. struct vb2_vmalloc_buf *buf = buf_priv;
  121. unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
  122. unsigned int i;
  123. if (buf->pages) {
  124. if (vaddr)
  125. vm_unmap_ram((void *)vaddr, buf->n_pages);
  126. for (i = 0; i < buf->n_pages; ++i) {
  127. if (buf->dma_dir == DMA_FROM_DEVICE)
  128. set_page_dirty_lock(buf->pages[i]);
  129. put_page(buf->pages[i]);
  130. }
  131. kfree(buf->pages);
  132. } else {
  133. vb2_put_vma(buf->vma);
  134. iounmap((__force void __iomem *)buf->vaddr);
  135. }
  136. kfree(buf);
  137. }
  138. static void *vb2_vmalloc_vaddr(void *buf_priv)
  139. {
  140. struct vb2_vmalloc_buf *buf = buf_priv;
  141. if (!buf->vaddr) {
  142. pr_err("Address of an unallocated plane requested "
  143. "or cannot map user pointer\n");
  144. return NULL;
  145. }
  146. return buf->vaddr;
  147. }
  148. static unsigned int vb2_vmalloc_num_users(void *buf_priv)
  149. {
  150. struct vb2_vmalloc_buf *buf = buf_priv;
  151. return atomic_read(&buf->refcount);
  152. }
  153. static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
  154. {
  155. struct vb2_vmalloc_buf *buf = buf_priv;
  156. int ret;
  157. if (!buf) {
  158. pr_err("No memory to map\n");
  159. return -EINVAL;
  160. }
  161. ret = remap_vmalloc_range(vma, buf->vaddr, 0);
  162. if (ret) {
  163. pr_err("Remapping vmalloc memory, error: %d\n", ret);
  164. return ret;
  165. }
  166. /*
  167. * Make sure that vm_areas for 2 buffers won't be merged together
  168. */
  169. vma->vm_flags |= VM_DONTEXPAND;
  170. /*
  171. * Use common vm_area operations to track buffer refcount.
  172. */
  173. vma->vm_private_data = &buf->handler;
  174. vma->vm_ops = &vb2_common_vm_ops;
  175. vma->vm_ops->open(vma);
  176. return 0;
  177. }
  178. #ifdef CONFIG_HAS_DMA
  179. /*********************************************/
  180. /* DMABUF ops for exporters */
  181. /*********************************************/
  182. struct vb2_vmalloc_attachment {
  183. struct sg_table sgt;
  184. enum dma_data_direction dma_dir;
  185. };
  186. static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
  187. struct dma_buf_attachment *dbuf_attach)
  188. {
  189. struct vb2_vmalloc_attachment *attach;
  190. struct vb2_vmalloc_buf *buf = dbuf->priv;
  191. int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
  192. struct sg_table *sgt;
  193. struct scatterlist *sg;
  194. void *vaddr = buf->vaddr;
  195. int ret;
  196. int i;
  197. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  198. if (!attach)
  199. return -ENOMEM;
  200. sgt = &attach->sgt;
  201. ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
  202. if (ret) {
  203. kfree(attach);
  204. return ret;
  205. }
  206. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  207. struct page *page = vmalloc_to_page(vaddr);
  208. if (!page) {
  209. sg_free_table(sgt);
  210. kfree(attach);
  211. return -ENOMEM;
  212. }
  213. sg_set_page(sg, page, PAGE_SIZE, 0);
  214. vaddr += PAGE_SIZE;
  215. }
  216. attach->dma_dir = DMA_NONE;
  217. dbuf_attach->priv = attach;
  218. return 0;
  219. }
  220. static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
  221. struct dma_buf_attachment *db_attach)
  222. {
  223. struct vb2_vmalloc_attachment *attach = db_attach->priv;
  224. struct sg_table *sgt;
  225. if (!attach)
  226. return;
  227. sgt = &attach->sgt;
  228. /* release the scatterlist cache */
  229. if (attach->dma_dir != DMA_NONE)
  230. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  231. attach->dma_dir);
  232. sg_free_table(sgt);
  233. kfree(attach);
  234. db_attach->priv = NULL;
  235. }
  236. static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
  237. struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
  238. {
  239. struct vb2_vmalloc_attachment *attach = db_attach->priv;
  240. /* stealing dmabuf mutex to serialize map/unmap operations */
  241. struct mutex *lock = &db_attach->dmabuf->lock;
  242. struct sg_table *sgt;
  243. int ret;
  244. mutex_lock(lock);
  245. sgt = &attach->sgt;
  246. /* return previously mapped sg table */
  247. if (attach->dma_dir == dma_dir) {
  248. mutex_unlock(lock);
  249. return sgt;
  250. }
  251. /* release any previous cache */
  252. if (attach->dma_dir != DMA_NONE) {
  253. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  254. attach->dma_dir);
  255. attach->dma_dir = DMA_NONE;
  256. }
  257. /* mapping to the client with new direction */
  258. ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
  259. if (ret <= 0) {
  260. pr_err("failed to map scatterlist\n");
  261. mutex_unlock(lock);
  262. return ERR_PTR(-EIO);
  263. }
  264. attach->dma_dir = dma_dir;
  265. mutex_unlock(lock);
  266. return sgt;
  267. }
  268. static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
  269. struct sg_table *sgt, enum dma_data_direction dma_dir)
  270. {
  271. /* nothing to be done here */
  272. }
  273. static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
  274. {
  275. /* drop reference obtained in vb2_vmalloc_get_dmabuf */
  276. vb2_vmalloc_put(dbuf->priv);
  277. }
  278. static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
  279. {
  280. struct vb2_vmalloc_buf *buf = dbuf->priv;
  281. return buf->vaddr + pgnum * PAGE_SIZE;
  282. }
  283. static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
  284. {
  285. struct vb2_vmalloc_buf *buf = dbuf->priv;
  286. return buf->vaddr;
  287. }
  288. static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
  289. struct vm_area_struct *vma)
  290. {
  291. return vb2_vmalloc_mmap(dbuf->priv, vma);
  292. }
  293. static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
  294. .attach = vb2_vmalloc_dmabuf_ops_attach,
  295. .detach = vb2_vmalloc_dmabuf_ops_detach,
  296. .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
  297. .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
  298. .kmap = vb2_vmalloc_dmabuf_ops_kmap,
  299. .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
  300. .vmap = vb2_vmalloc_dmabuf_ops_vmap,
  301. .mmap = vb2_vmalloc_dmabuf_ops_mmap,
  302. .release = vb2_vmalloc_dmabuf_ops_release,
  303. };
  304. static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
  305. {
  306. struct vb2_vmalloc_buf *buf = buf_priv;
  307. struct dma_buf *dbuf;
  308. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  309. exp_info.ops = &vb2_vmalloc_dmabuf_ops;
  310. exp_info.size = buf->size;
  311. exp_info.flags = flags;
  312. exp_info.priv = buf;
  313. if (WARN_ON(!buf->vaddr))
  314. return NULL;
  315. dbuf = dma_buf_export(&exp_info);
  316. if (IS_ERR(dbuf))
  317. return NULL;
  318. /* dmabuf keeps reference to vb2 buffer */
  319. atomic_inc(&buf->refcount);
  320. return dbuf;
  321. }
  322. #endif /* CONFIG_HAS_DMA */
  323. /*********************************************/
  324. /* callbacks for DMABUF buffers */
  325. /*********************************************/
  326. static int vb2_vmalloc_map_dmabuf(void *mem_priv)
  327. {
  328. struct vb2_vmalloc_buf *buf = mem_priv;
  329. buf->vaddr = dma_buf_vmap(buf->dbuf);
  330. return buf->vaddr ? 0 : -EFAULT;
  331. }
  332. static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
  333. {
  334. struct vb2_vmalloc_buf *buf = mem_priv;
  335. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  336. buf->vaddr = NULL;
  337. }
  338. static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
  339. {
  340. struct vb2_vmalloc_buf *buf = mem_priv;
  341. if (buf->vaddr)
  342. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  343. kfree(buf);
  344. }
  345. static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
  346. unsigned long size, enum dma_data_direction dma_dir)
  347. {
  348. struct vb2_vmalloc_buf *buf;
  349. if (dbuf->size < size)
  350. return ERR_PTR(-EFAULT);
  351. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  352. if (!buf)
  353. return ERR_PTR(-ENOMEM);
  354. buf->dbuf = dbuf;
  355. buf->dma_dir = dma_dir;
  356. buf->size = size;
  357. return buf;
  358. }
  359. const struct vb2_mem_ops vb2_vmalloc_memops = {
  360. .alloc = vb2_vmalloc_alloc,
  361. .put = vb2_vmalloc_put,
  362. .get_userptr = vb2_vmalloc_get_userptr,
  363. .put_userptr = vb2_vmalloc_put_userptr,
  364. #ifdef CONFIG_HAS_DMA
  365. .get_dmabuf = vb2_vmalloc_get_dmabuf,
  366. #endif
  367. .map_dmabuf = vb2_vmalloc_map_dmabuf,
  368. .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
  369. .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
  370. .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
  371. .vaddr = vb2_vmalloc_vaddr,
  372. .mmap = vb2_vmalloc_mmap,
  373. .num_users = vb2_vmalloc_num_users,
  374. };
  375. EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
  376. MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
  377. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  378. MODULE_LICENSE("GPL");