videobuf2-vmalloc.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. /*
  2. * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/io.h>
  13. #include <linux/module.h>
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #include <media/videobuf2-v4l2.h>
  19. #include <media/videobuf2-vmalloc.h>
  20. #include <media/videobuf2-memops.h>
  21. struct vb2_vmalloc_buf {
  22. void *vaddr;
  23. struct frame_vector *vec;
  24. enum dma_data_direction dma_dir;
  25. unsigned long size;
  26. atomic_t refcount;
  27. struct vb2_vmarea_handler handler;
  28. struct dma_buf *dbuf;
  29. };
  30. static void vb2_vmalloc_put(void *buf_priv);
  31. static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
  32. unsigned long size, enum dma_data_direction dma_dir,
  33. gfp_t gfp_flags)
  34. {
  35. struct vb2_vmalloc_buf *buf;
  36. buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
  37. if (!buf)
  38. return NULL;
  39. buf->size = size;
  40. buf->vaddr = vmalloc_user(buf->size);
  41. buf->dma_dir = dma_dir;
  42. buf->handler.refcount = &buf->refcount;
  43. buf->handler.put = vb2_vmalloc_put;
  44. buf->handler.arg = buf;
  45. if (!buf->vaddr) {
  46. pr_debug("vmalloc of size %ld failed\n", buf->size);
  47. kfree(buf);
  48. return NULL;
  49. }
  50. atomic_inc(&buf->refcount);
  51. return buf;
  52. }
  53. static void vb2_vmalloc_put(void *buf_priv)
  54. {
  55. struct vb2_vmalloc_buf *buf = buf_priv;
  56. if (atomic_dec_and_test(&buf->refcount)) {
  57. vfree(buf->vaddr);
  58. kfree(buf);
  59. }
  60. }
  61. static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
  62. unsigned long size,
  63. enum dma_data_direction dma_dir)
  64. {
  65. struct vb2_vmalloc_buf *buf;
  66. struct frame_vector *vec;
  67. int n_pages, offset, i;
  68. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  69. if (!buf)
  70. return NULL;
  71. buf->dma_dir = dma_dir;
  72. offset = vaddr & ~PAGE_MASK;
  73. buf->size = size;
  74. vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
  75. if (IS_ERR(vec))
  76. goto fail_pfnvec_create;
  77. buf->vec = vec;
  78. n_pages = frame_vector_count(vec);
  79. if (frame_vector_to_pages(vec) < 0) {
  80. unsigned long *nums = frame_vector_pfns(vec);
  81. /*
  82. * We cannot get page pointers for these pfns. Check memory is
  83. * physically contiguous and use direct mapping.
  84. */
  85. for (i = 1; i < n_pages; i++)
  86. if (nums[i-1] + 1 != nums[i])
  87. goto fail_map;
  88. buf->vaddr = (__force void *)
  89. ioremap_nocache(nums[0] << PAGE_SHIFT, size);
  90. } else {
  91. buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
  92. PAGE_KERNEL);
  93. }
  94. if (!buf->vaddr)
  95. goto fail_map;
  96. buf->vaddr += offset;
  97. return buf;
  98. fail_map:
  99. vb2_destroy_framevec(vec);
  100. fail_pfnvec_create:
  101. kfree(buf);
  102. return NULL;
  103. }
  104. static void vb2_vmalloc_put_userptr(void *buf_priv)
  105. {
  106. struct vb2_vmalloc_buf *buf = buf_priv;
  107. unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
  108. unsigned int i;
  109. struct page **pages;
  110. unsigned int n_pages;
  111. if (!buf->vec->is_pfns) {
  112. n_pages = frame_vector_count(buf->vec);
  113. pages = frame_vector_pages(buf->vec);
  114. if (vaddr)
  115. vm_unmap_ram((void *)vaddr, n_pages);
  116. if (buf->dma_dir == DMA_FROM_DEVICE)
  117. for (i = 0; i < n_pages; i++)
  118. set_page_dirty_lock(pages[i]);
  119. } else {
  120. iounmap((__force void __iomem *)buf->vaddr);
  121. }
  122. vb2_destroy_framevec(buf->vec);
  123. kfree(buf);
  124. }
  125. static void *vb2_vmalloc_vaddr(void *buf_priv)
  126. {
  127. struct vb2_vmalloc_buf *buf = buf_priv;
  128. if (!buf->vaddr) {
  129. pr_err("Address of an unallocated plane requested "
  130. "or cannot map user pointer\n");
  131. return NULL;
  132. }
  133. return buf->vaddr;
  134. }
  135. static unsigned int vb2_vmalloc_num_users(void *buf_priv)
  136. {
  137. struct vb2_vmalloc_buf *buf = buf_priv;
  138. return atomic_read(&buf->refcount);
  139. }
  140. static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
  141. {
  142. struct vb2_vmalloc_buf *buf = buf_priv;
  143. int ret;
  144. if (!buf) {
  145. pr_err("No memory to map\n");
  146. return -EINVAL;
  147. }
  148. ret = remap_vmalloc_range(vma, buf->vaddr, 0);
  149. if (ret) {
  150. pr_err("Remapping vmalloc memory, error: %d\n", ret);
  151. return ret;
  152. }
  153. /*
  154. * Make sure that vm_areas for 2 buffers won't be merged together
  155. */
  156. vma->vm_flags |= VM_DONTEXPAND;
  157. /*
  158. * Use common vm_area operations to track buffer refcount.
  159. */
  160. vma->vm_private_data = &buf->handler;
  161. vma->vm_ops = &vb2_common_vm_ops;
  162. vma->vm_ops->open(vma);
  163. return 0;
  164. }
  165. #ifdef CONFIG_HAS_DMA
  166. /*********************************************/
  167. /* DMABUF ops for exporters */
  168. /*********************************************/
  169. struct vb2_vmalloc_attachment {
  170. struct sg_table sgt;
  171. enum dma_data_direction dma_dir;
  172. };
  173. static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
  174. struct dma_buf_attachment *dbuf_attach)
  175. {
  176. struct vb2_vmalloc_attachment *attach;
  177. struct vb2_vmalloc_buf *buf = dbuf->priv;
  178. int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
  179. struct sg_table *sgt;
  180. struct scatterlist *sg;
  181. void *vaddr = buf->vaddr;
  182. int ret;
  183. int i;
  184. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  185. if (!attach)
  186. return -ENOMEM;
  187. sgt = &attach->sgt;
  188. ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
  189. if (ret) {
  190. kfree(attach);
  191. return ret;
  192. }
  193. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  194. struct page *page = vmalloc_to_page(vaddr);
  195. if (!page) {
  196. sg_free_table(sgt);
  197. kfree(attach);
  198. return -ENOMEM;
  199. }
  200. sg_set_page(sg, page, PAGE_SIZE, 0);
  201. vaddr += PAGE_SIZE;
  202. }
  203. attach->dma_dir = DMA_NONE;
  204. dbuf_attach->priv = attach;
  205. return 0;
  206. }
  207. static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
  208. struct dma_buf_attachment *db_attach)
  209. {
  210. struct vb2_vmalloc_attachment *attach = db_attach->priv;
  211. struct sg_table *sgt;
  212. if (!attach)
  213. return;
  214. sgt = &attach->sgt;
  215. /* release the scatterlist cache */
  216. if (attach->dma_dir != DMA_NONE)
  217. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  218. attach->dma_dir);
  219. sg_free_table(sgt);
  220. kfree(attach);
  221. db_attach->priv = NULL;
  222. }
  223. static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
  224. struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
  225. {
  226. struct vb2_vmalloc_attachment *attach = db_attach->priv;
  227. /* stealing dmabuf mutex to serialize map/unmap operations */
  228. struct mutex *lock = &db_attach->dmabuf->lock;
  229. struct sg_table *sgt;
  230. mutex_lock(lock);
  231. sgt = &attach->sgt;
  232. /* return previously mapped sg table */
  233. if (attach->dma_dir == dma_dir) {
  234. mutex_unlock(lock);
  235. return sgt;
  236. }
  237. /* release any previous cache */
  238. if (attach->dma_dir != DMA_NONE) {
  239. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  240. attach->dma_dir);
  241. attach->dma_dir = DMA_NONE;
  242. }
  243. /* mapping to the client with new direction */
  244. sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  245. dma_dir);
  246. if (!sgt->nents) {
  247. pr_err("failed to map scatterlist\n");
  248. mutex_unlock(lock);
  249. return ERR_PTR(-EIO);
  250. }
  251. attach->dma_dir = dma_dir;
  252. mutex_unlock(lock);
  253. return sgt;
  254. }
  255. static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
  256. struct sg_table *sgt, enum dma_data_direction dma_dir)
  257. {
  258. /* nothing to be done here */
  259. }
  260. static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
  261. {
  262. /* drop reference obtained in vb2_vmalloc_get_dmabuf */
  263. vb2_vmalloc_put(dbuf->priv);
  264. }
  265. static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
  266. {
  267. struct vb2_vmalloc_buf *buf = dbuf->priv;
  268. return buf->vaddr + pgnum * PAGE_SIZE;
  269. }
  270. static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
  271. {
  272. struct vb2_vmalloc_buf *buf = dbuf->priv;
  273. return buf->vaddr;
  274. }
  275. static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
  276. struct vm_area_struct *vma)
  277. {
  278. return vb2_vmalloc_mmap(dbuf->priv, vma);
  279. }
  280. static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
  281. .attach = vb2_vmalloc_dmabuf_ops_attach,
  282. .detach = vb2_vmalloc_dmabuf_ops_detach,
  283. .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
  284. .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
  285. .kmap = vb2_vmalloc_dmabuf_ops_kmap,
  286. .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
  287. .vmap = vb2_vmalloc_dmabuf_ops_vmap,
  288. .mmap = vb2_vmalloc_dmabuf_ops_mmap,
  289. .release = vb2_vmalloc_dmabuf_ops_release,
  290. };
  291. static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
  292. {
  293. struct vb2_vmalloc_buf *buf = buf_priv;
  294. struct dma_buf *dbuf;
  295. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  296. exp_info.ops = &vb2_vmalloc_dmabuf_ops;
  297. exp_info.size = buf->size;
  298. exp_info.flags = flags;
  299. exp_info.priv = buf;
  300. if (WARN_ON(!buf->vaddr))
  301. return NULL;
  302. dbuf = dma_buf_export(&exp_info);
  303. if (IS_ERR(dbuf))
  304. return NULL;
  305. /* dmabuf keeps reference to vb2 buffer */
  306. atomic_inc(&buf->refcount);
  307. return dbuf;
  308. }
  309. #endif /* CONFIG_HAS_DMA */
  310. /*********************************************/
  311. /* callbacks for DMABUF buffers */
  312. /*********************************************/
  313. static int vb2_vmalloc_map_dmabuf(void *mem_priv)
  314. {
  315. struct vb2_vmalloc_buf *buf = mem_priv;
  316. buf->vaddr = dma_buf_vmap(buf->dbuf);
  317. return buf->vaddr ? 0 : -EFAULT;
  318. }
  319. static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
  320. {
  321. struct vb2_vmalloc_buf *buf = mem_priv;
  322. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  323. buf->vaddr = NULL;
  324. }
  325. static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
  326. {
  327. struct vb2_vmalloc_buf *buf = mem_priv;
  328. if (buf->vaddr)
  329. dma_buf_vunmap(buf->dbuf, buf->vaddr);
  330. kfree(buf);
  331. }
  332. static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
  333. unsigned long size, enum dma_data_direction dma_dir)
  334. {
  335. struct vb2_vmalloc_buf *buf;
  336. if (dbuf->size < size)
  337. return ERR_PTR(-EFAULT);
  338. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  339. if (!buf)
  340. return ERR_PTR(-ENOMEM);
  341. buf->dbuf = dbuf;
  342. buf->dma_dir = dma_dir;
  343. buf->size = size;
  344. return buf;
  345. }
  346. const struct vb2_mem_ops vb2_vmalloc_memops = {
  347. .alloc = vb2_vmalloc_alloc,
  348. .put = vb2_vmalloc_put,
  349. .get_userptr = vb2_vmalloc_get_userptr,
  350. .put_userptr = vb2_vmalloc_put_userptr,
  351. #ifdef CONFIG_HAS_DMA
  352. .get_dmabuf = vb2_vmalloc_get_dmabuf,
  353. #endif
  354. .map_dmabuf = vb2_vmalloc_map_dmabuf,
  355. .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
  356. .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
  357. .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
  358. .vaddr = vb2_vmalloc_vaddr,
  359. .mmap = vb2_vmalloc_mmap,
  360. .num_users = vb2_vmalloc_num_users,
  361. };
  362. EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
  363. MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
  364. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  365. MODULE_LICENSE("GPL");