videobuf2-vmalloc.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. /*
  2. * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/io.h>
  13. #include <linux/module.h>
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #include <media/videobuf2-v4l2.h>
  19. #include <media/videobuf2-vmalloc.h>
  20. #include <media/videobuf2-memops.h>
  21. struct vb2_vmalloc_buf {
  22. void *vaddr;
  23. struct frame_vector *vec;
  24. enum dma_data_direction dma_dir;
  25. unsigned long size;
  26. atomic_t refcount;
  27. struct vb2_vmarea_handler handler;
  28. struct dma_buf *dbuf;
  29. };
  30. static void vb2_vmalloc_put(void *buf_priv);
  31. static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
  32. unsigned long size, enum dma_data_direction dma_dir,
  33. gfp_t gfp_flags)
  34. {
  35. struct vb2_vmalloc_buf *buf;
  36. buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
  37. if (!buf)
  38. return ERR_PTR(-ENOMEM);
  39. buf->size = size;
  40. buf->vaddr = vmalloc_user(buf->size);
  41. buf->dma_dir = dma_dir;
  42. buf->handler.refcount = &buf->refcount;
  43. buf->handler.put = vb2_vmalloc_put;
  44. buf->handler.arg = buf;
  45. if (!buf->vaddr) {
  46. pr_debug("vmalloc of size %ld failed\n", buf->size);
  47. kfree(buf);
  48. return ERR_PTR(-ENOMEM);
  49. }
  50. atomic_inc(&buf->refcount);
  51. return buf;
  52. }
  53. static void vb2_vmalloc_put(void *buf_priv)
  54. {
  55. struct vb2_vmalloc_buf *buf = buf_priv;
  56. if (atomic_dec_and_test(&buf->refcount)) {
  57. vfree(buf->vaddr);
  58. kfree(buf);
  59. }
  60. }
  61. static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
  62. unsigned long size,
  63. enum dma_data_direction dma_dir)
  64. {
  65. struct vb2_vmalloc_buf *buf;
  66. struct frame_vector *vec;
  67. int n_pages, offset, i;
  68. int ret = -ENOMEM;
  69. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  70. if (!buf)
  71. return ERR_PTR(-ENOMEM);
  72. buf->dma_dir = dma_dir;
  73. offset = vaddr & ~PAGE_MASK;
  74. buf->size = size;
  75. vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
  76. if (IS_ERR(vec)) {
  77. ret = PTR_ERR(vec);
  78. goto fail_pfnvec_create;
  79. }
  80. buf->vec = vec;
  81. n_pages = frame_vector_count(vec);
  82. if (frame_vector_to_pages(vec) < 0) {
  83. unsigned long *nums = frame_vector_pfns(vec);
  84. /*
  85. * We cannot get page pointers for these pfns. Check memory is
  86. * physically contiguous and use direct mapping.
  87. */
  88. for (i = 1; i < n_pages; i++)
  89. if (nums[i-1] + 1 != nums[i])
  90. goto fail_map;
  91. buf->vaddr = (__force void *)
  92. ioremap_nocache(nums[0] << PAGE_SHIFT, size);
  93. } else {
  94. buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
  95. PAGE_KERNEL);
  96. }
  97. if (!buf->vaddr)
  98. goto fail_map;
  99. buf->vaddr += offset;
  100. return buf;
  101. fail_map:
  102. vb2_destroy_framevec(vec);
  103. fail_pfnvec_create:
  104. kfree(buf);
  105. return ERR_PTR(ret);
  106. }
  107. static void vb2_vmalloc_put_userptr(void *buf_priv)
  108. {
  109. struct vb2_vmalloc_buf *buf = buf_priv;
  110. unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
  111. unsigned int i;
  112. struct page **pages;
  113. unsigned int n_pages;
  114. if (!buf->vec->is_pfns) {
  115. n_pages = frame_vector_count(buf->vec);
  116. pages = frame_vector_pages(buf->vec);
  117. if (vaddr)
  118. vm_unmap_ram((void *)vaddr, n_pages);
  119. if (buf->dma_dir == DMA_FROM_DEVICE)
  120. for (i = 0; i < n_pages; i++)
  121. set_page_dirty_lock(pages[i]);
  122. } else {
  123. iounmap((__force void __iomem *)buf->vaddr);
  124. }
  125. vb2_destroy_framevec(buf->vec);
  126. kfree(buf);
  127. }
  128. static void *vb2_vmalloc_vaddr(void *buf_priv)
  129. {
  130. struct vb2_vmalloc_buf *buf = buf_priv;
  131. if (!buf->vaddr) {
  132. pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
  133. return NULL;
  134. }
  135. return buf->vaddr;
  136. }
  137. static unsigned int vb2_vmalloc_num_users(void *buf_priv)
  138. {
  139. struct vb2_vmalloc_buf *buf = buf_priv;
  140. return atomic_read(&buf->refcount);
  141. }
  142. static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
  143. {
  144. struct vb2_vmalloc_buf *buf = buf_priv;
  145. int ret;
  146. if (!buf) {
  147. pr_err("No memory to map\n");
  148. return -EINVAL;
  149. }
  150. ret = remap_vmalloc_range(vma, buf->vaddr, 0);
  151. if (ret) {
  152. pr_err("Remapping vmalloc memory, error: %d\n", ret);
  153. return ret;
  154. }
  155. /*
  156. * Make sure that vm_areas for 2 buffers won't be merged together
  157. */
  158. vma->vm_flags |= VM_DONTEXPAND;
  159. /*
  160. * Use common vm_area operations to track buffer refcount.
  161. */
  162. vma->vm_private_data = &buf->handler;
  163. vma->vm_ops = &vb2_common_vm_ops;
  164. vma->vm_ops->open(vma);
  165. return 0;
  166. }
  167. #ifdef CONFIG_HAS_DMA
  168. /*********************************************/
  169. /* DMABUF ops for exporters */
  170. /*********************************************/
  171. struct vb2_vmalloc_attachment {
  172. struct sg_table sgt;
  173. enum dma_data_direction dma_dir;
  174. };
  175. static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
  176. struct dma_buf_attachment *dbuf_attach)
  177. {
  178. struct vb2_vmalloc_attachment *attach;
  179. struct vb2_vmalloc_buf *buf = dbuf->priv;
  180. int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
  181. struct sg_table *sgt;
  182. struct scatterlist *sg;
  183. void *vaddr = buf->vaddr;
  184. int ret;
  185. int i;
  186. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  187. if (!attach)
  188. return -ENOMEM;
  189. sgt = &attach->sgt;
  190. ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
  191. if (ret) {
  192. kfree(attach);
  193. return ret;
  194. }
  195. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  196. struct page *page = vmalloc_to_page(vaddr);
  197. if (!page) {
  198. sg_free_table(sgt);
  199. kfree(attach);
  200. return -ENOMEM;
  201. }
  202. sg_set_page(sg, page, PAGE_SIZE, 0);
  203. vaddr += PAGE_SIZE;
  204. }
  205. attach->dma_dir = DMA_NONE;
  206. dbuf_attach->priv = attach;
  207. return 0;
  208. }
  209. static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
  210. struct dma_buf_attachment *db_attach)
  211. {
  212. struct vb2_vmalloc_attachment *attach = db_attach->priv;
  213. struct sg_table *sgt;
  214. if (!attach)
  215. return;
  216. sgt = &attach->sgt;
  217. /* release the scatterlist cache */
  218. if (attach->dma_dir != DMA_NONE)
  219. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  220. attach->dma_dir);
  221. sg_free_table(sgt);
  222. kfree(attach);
  223. db_attach->priv = NULL;
  224. }
  225. static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
  226. struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
  227. {
  228. struct vb2_vmalloc_attachment *attach = db_attach->priv;
  229. /* stealing dmabuf mutex to serialize map/unmap operations */
  230. struct mutex *lock = &db_attach->dmabuf->lock;
  231. struct sg_table *sgt;
  232. mutex_lock(lock);
  233. sgt = &attach->sgt;
  234. /* return previously mapped sg table */
  235. if (attach->dma_dir == dma_dir) {
  236. mutex_unlock(lock);
  237. return sgt;
  238. }
  239. /* release any previous cache */
  240. if (attach->dma_dir != DMA_NONE) {
  241. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  242. attach->dma_dir);
  243. attach->dma_dir = DMA_NONE;
  244. }
  245. /* mapping to the client with new direction */
  246. sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  247. dma_dir);
  248. if (!sgt->nents) {
  249. pr_err("failed to map scatterlist\n");
  250. mutex_unlock(lock);
  251. return ERR_PTR(-EIO);
  252. }
  253. attach->dma_dir = dma_dir;
  254. mutex_unlock(lock);
  255. return sgt;
  256. }
  257. static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
  258. struct sg_table *sgt, enum dma_data_direction dma_dir)
  259. {
  260. /* nothing to be done here */
  261. }
  262. static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
  263. {
  264. /* drop reference obtained in vb2_vmalloc_get_dmabuf */
  265. vb2_vmalloc_put(dbuf->priv);
  266. }
  267. static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
  268. {
  269. struct vb2_vmalloc_buf *buf = dbuf->priv;
  270. return buf->vaddr + pgnum * PAGE_SIZE;
  271. }
  272. static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
  273. {
  274. struct vb2_vmalloc_buf *buf = dbuf->priv;
  275. return buf->vaddr;
  276. }
  277. static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
  278. struct vm_area_struct *vma)
  279. {
  280. return vb2_vmalloc_mmap(dbuf->priv, vma);
  281. }
  282. static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
  283. .attach = vb2_vmalloc_dmabuf_ops_attach,
  284. .detach = vb2_vmalloc_dmabuf_ops_detach,
  285. .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
  286. .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
  287. .kmap = vb2_vmalloc_dmabuf_ops_kmap,
  288. .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
  289. .vmap = vb2_vmalloc_dmabuf_ops_vmap,
  290. .mmap = vb2_vmalloc_dmabuf_ops_mmap,
  291. .release = vb2_vmalloc_dmabuf_ops_release,
  292. };
  293. static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
  294. {
  295. struct vb2_vmalloc_buf *buf = buf_priv;
  296. struct dma_buf *dbuf;
  297. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  298. exp_info.ops = &vb2_vmalloc_dmabuf_ops;
  299. exp_info.size = buf->size;
  300. exp_info.flags = flags;
  301. exp_info.priv = buf;
  302. if (WARN_ON(!buf->vaddr))
  303. return NULL;
  304. dbuf = dma_buf_export(&exp_info);
  305. if (IS_ERR(dbuf))
  306. return NULL;
  307. /* dmabuf keeps reference to vb2 buffer */
  308. atomic_inc(&buf->refcount);
  309. return dbuf;
  310. }
  311. #endif /* CONFIG_HAS_DMA */
  312. /*********************************************/
  313. /* callbacks for DMABUF buffers */
  314. /*********************************************/
  315. static int vb2_vmalloc_map_dmabuf(void *mem_priv)
  316. {
  317. struct vb2_vmalloc_buf *buf = mem_priv;
  318. buf->vaddr = dma_buf_vmap(buf->dbuf);
  319. return buf->vaddr ? 0 : -EFAULT;
  320. }
  321. static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
  322. {
  323. struct vb2_vmalloc_buf *buf = mem_priv;
  324. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  325. buf->vaddr = NULL;
  326. }
  327. static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
  328. {
  329. struct vb2_vmalloc_buf *buf = mem_priv;
  330. if (buf->vaddr)
  331. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  332. kfree(buf);
  333. }
  334. static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
  335. unsigned long size, enum dma_data_direction dma_dir)
  336. {
  337. struct vb2_vmalloc_buf *buf;
  338. if (dbuf->size < size)
  339. return ERR_PTR(-EFAULT);
  340. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  341. if (!buf)
  342. return ERR_PTR(-ENOMEM);
  343. buf->dbuf = dbuf;
  344. buf->dma_dir = dma_dir;
  345. buf->size = size;
  346. return buf;
  347. }
  348. const struct vb2_mem_ops vb2_vmalloc_memops = {
  349. .alloc = vb2_vmalloc_alloc,
  350. .put = vb2_vmalloc_put,
  351. .get_userptr = vb2_vmalloc_get_userptr,
  352. .put_userptr = vb2_vmalloc_put_userptr,
  353. #ifdef CONFIG_HAS_DMA
  354. .get_dmabuf = vb2_vmalloc_get_dmabuf,
  355. #endif
  356. .map_dmabuf = vb2_vmalloc_map_dmabuf,
  357. .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
  358. .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
  359. .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
  360. .vaddr = vb2_vmalloc_vaddr,
  361. .mmap = vb2_vmalloc_mmap,
  362. .num_users = vb2_vmalloc_num_users,
  363. };
  364. EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
  365. MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
  366. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  367. MODULE_LICENSE("GPL");